repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_InputCapture/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_PWMOutput/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_PWMOutput/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_PWMOutput/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_PWMInput/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_PWMInput/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_PWMInput/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_DMA/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_DMA/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_DMA/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_ComplementarySignals/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_ComplementarySignals/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_ComplementarySignals/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_TimeBase/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_TimeBase/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/TIM/TIM_TimeBase/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/WWDG/WWDG_Example/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/WWDG/WWDG_Example/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/WWDG/WWDG_Example/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/PWR/PWR_PVD/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/PWR/PWR_PVD/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/PWR/PWR_PVD/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/PWR/PWR_CurrentConsumption/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/PWR/PWR_CurrentConsumption/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/PWR/PWR_CurrentConsumption/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/DAC/DAC_SimpleConversion/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/DAC/DAC_SimpleConversion/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/DAC/DAC_SimpleConversion/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/DAC/DAC_SignalsGeneration/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/DAC/DAC_SignalsGeneration/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/DAC/DAC_SignalsGeneration/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aerror2/erfly6
| 7,504
|
STM32F0xx/Source/STM32F0xx_Startup.s
|
/*********************************************************************
* SEGGER Microcontroller GmbH *
* The Embedded Experts *
**********************************************************************
* *
* (c) 2014 - 2020 SEGGER Microcontroller GmbH *
* *
* www.segger.com Support: support@segger.com *
* *
**********************************************************************
* *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or *
* without modification, are permitted provided that the following *
* condition is met: *
* *
* - Redistributions of source code must retain the above copyright *
* notice, this condition and the following disclaimer. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND *
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, *
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *
* DISCLAIMED. IN NO EVENT SHALL SEGGER Microcontroller BE LIABLE FOR *
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR *
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; *
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF *
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE *
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH *
* DAMAGE. *
* *
**********************************************************************
-------------------------- END-OF-HEADER -----------------------------
File : STM32F0xx_Startup.s
Purpose : Startup and exception handlers for STM32F0xx devices.
Additional information:
Preprocessor Definitions
__NO_SYSTEM_INIT
If defined,
SystemInit is not called.
If not defined,
SystemInit is called.
SystemInit is usually supplied by the CMSIS files.
This file declares a weak implementation as fallback.
__MEMORY_INIT
If defined,
MemoryInit is called after SystemInit.
void MemoryInit(void) can be implemented to enable external
memory controllers.
*/
.syntax unified
/*********************************************************************
*
* Global functions
*
**********************************************************************
*/
/*********************************************************************
*
* Reset_Handler
*
* Function description
* Exception handler for reset.
* Generic bringup of a Cortex-M system.
*
* Additional information
* The stack pointer is expected to be initialized by hardware,
* i.e. read from vectortable[0].
* For manual initialization add
* ldr R0, =__stack_end__
* mov SP, R0
*/
.global reset_handler
.global Reset_Handler
.equ reset_handler, Reset_Handler
.section .init.Reset_Handler, "ax"
.balign 2
.thumb_func
Reset_Handler:
#ifndef __NO_SYSTEM_INIT
//
// Call SystemInit
//
bl SystemInit
#endif
#ifdef __MEMORY_INIT
//
// Call MemoryInit
//
bl MemoryInit
#endif
#ifdef __VECTORS_IN_RAM
//
// Copy vector table (from Flash) to RAM
//
ldr R0, =__vectors_start__
ldr R1, =__vectors_end__
ldr R2, =__vectors_ram_start__
1:
cmp R0, R1
beq 2f
ldr R3, [R0]
str R3, [R2]
adds R0, R0, #4
adds R2, R2, #4
b 1b
2:
#endif
//
// Call runtime initialization, which calls main().
//
bl _start
//
// Weak only declaration of SystemInit enables Linker to replace bl SystemInit with a NOP,
// when there is no strong definition of SystemInit.
//
.weak SystemInit
//
// Place SystmeCoreClockUpdate in .init_array
// to be called after runtime initialization
//
#ifndef __NO_SYSTEM_INIT
.section .init_array, "aw"
.balign 4
.word SystemCoreClockUpdate
#endif
/*********************************************************************
*
* HardFault_Handler
*
* Function description
* Simple exception handler for HardFault.
* In case of a HardFault caused by BKPT instruction without
* debugger attached, return execution, otherwise stay in loop.
*
* Additional information
* The stack pointer is expected to be initialized by hardware,
* i.e. read from vectortable[0].
* For manual initialization add
* ldr R0, =__stack_end__
* mov SP, R0
*/
.weak HardFault_Handler
.section .init.HardFault_Handler, "ax"
.balign 2
.thumb_func
HardFault_Handler:
//
// Check if HardFault is caused by BKPT instruction
//
ldr R1, =0xE000ED2C // Load NVIC_HFSR
ldr R2, [R1]
cmp R2, #0 // Check NVIC_HFSR[31]
hfLoop:
bmi hfLoop // Not set? Stay in HardFault Handler.
//
// Continue execution after BKPT instruction
//
#if defined(__thumb__) && !defined(__thumb2__)
movs R0, #4
mov R1, LR
tst R0, R1 // Check EXC_RETURN in Link register bit 2.
bne Uses_PSP
mrs R0, MSP // Stacking was using MSP.
b Pass_StackPtr
Uses_PSP:
mrs R0, PSP // Stacking was using PSP.
Pass_StackPtr:
#else
tst LR, #4 // Check EXC_RETURN[2] in link register to get the return stack
ite eq
mrseq R0, MSP // Frame stored on MSP
mrsne R0, PSP // Frame stored on PSP
#endif
//
// Reset HardFault Status
//
#if defined(__thumb__) && !defined(__thumb2__)
movs R3, #1
lsls R3, R3, #31
orrs R2, R3
str R2, [R1]
#else
orr R2, R2, #0x80000000
str R2, [R1]
#endif
//
// Adjust return address
//
ldr R1, [R0, #24] // Get stored PC from stack
adds R1, #2 // Adjust PC by 2 to skip current BKPT
str R1, [R0, #24] // Write back adjusted PC to stack
//
bx LR // Return
/*************************** End of file ****************************/
|
aerror2/erfly6
| 7,645
|
STM32F0xx/Source/STM32F072x_Vectors.s
|
/*********************************************************************
* SEGGER Microcontroller GmbH *
* The Embedded Experts *
**********************************************************************
* *
* (c) 2014 - 2020 SEGGER Microcontroller GmbH *
* *
* www.segger.com Support: support@segger.com *
* *
**********************************************************************
* *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or *
* without modification, are permitted provided that the following *
* condition is met: *
* *
* - Redistributions of source code must retain the above copyright *
* notice, this condition and the following disclaimer. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND *
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, *
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *
* DISCLAIMED. IN NO EVENT SHALL SEGGER Microcontroller BE LIABLE FOR *
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR *
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; *
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF *
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE *
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH *
* DAMAGE. *
* *
**********************************************************************
-------------------------- END-OF-HEADER -----------------------------
File : STM32F072x_Vectors.s
Purpose : Exception and interrupt vectors for STM32F072x devices.
Additional information:
Preprocessor Definitions
__NO_EXTERNAL_INTERRUPTS
If defined,
the vector table will contain only the internal exceptions
and interrupts.
__OPTIMIZATION_SMALL
If defined,
all weak definitions of interrupt handlers will share the
same implementation.
If not defined,
all weak definitions of interrupt handlers will be defined
with their own implementation.
*/
.syntax unified
/*********************************************************************
*
* Macros
*
**********************************************************************
*/
//
// Directly place a vector (word) in the vector table
//
.macro VECTOR Name=
.section .vectors, "ax"
.code 16
.word \Name
.endm
//
// Declare an exception handler with a weak definition
//
.macro EXC_HANDLER Name=
//
// Insert vector in vector table
//
.section .vectors, "ax"
.word \Name
//
// Insert dummy handler in init section
//
.section .init.\Name, "ax"
.thumb_func
.weak \Name
.balign 2
\Name:
1: b 1b // Endless loop
.endm
//
// Declare an interrupt handler with a weak definition
//
.macro ISR_HANDLER Name=
//
// Insert vector in vector table
//
.section .vectors, "ax"
.word \Name
//
// Insert dummy handler in init section
//
#if defined(__OPTIMIZATION_SMALL)
.section .init, "ax"
.weak \Name
.thumb_set \Name,Dummy_Handler
#else
.section .init.\Name, "ax"
.thumb_func
.weak \Name
.balign 2
\Name:
1: b 1b // Endless loop
#endif
.endm
//
// Place a reserved vector in vector table
//
.macro ISR_RESERVED
.section .vectors, "ax"
.word 0
.endm
//
// Place a reserved vector in vector table
//
.macro ISR_RESERVED_DUMMY
.section .vectors, "ax"
.word Dummy_Handler
.endm
/*********************************************************************
*
* Externals
*
**********************************************************************
*/
.extern __stack_end__
.extern Reset_Handler
.extern HardFault_Handler
/*********************************************************************
*
* Global functions
*
**********************************************************************
*/
/*********************************************************************
*
* Setup of the vector table and weak definition of interrupt handlers
*
*/
.section .vectors, "ax"
.code 16
.balign 512
.global _vectors
_vectors:
//
// Internal exceptions and interrupts
//
VECTOR __stack_end__
VECTOR Reset_Handler
EXC_HANDLER NMI_Handler
VECTOR HardFault_Handler
ISR_RESERVED
ISR_RESERVED
ISR_RESERVED
ISR_RESERVED
ISR_RESERVED
ISR_RESERVED
ISR_RESERVED
EXC_HANDLER SVC_Handler
ISR_RESERVED
ISR_RESERVED
EXC_HANDLER PendSV_Handler
EXC_HANDLER SysTick_Handler
//
// External interrupts
//
#ifndef __NO_EXTERNAL_INTERRUPTS
ISR_HANDLER WWDG_IRQHandler
ISR_HANDLER PVD_IRQHandler
ISR_HANDLER RTC_IRQHandler
ISR_HANDLER FLASH_IRQHandler
ISR_HANDLER RCC_CRS_IRQHandler
ISR_HANDLER EXTI0_1_IRQHandler
ISR_HANDLER EXTI2_3_IRQHandler
ISR_HANDLER EXTI4_15_IRQHandler
ISR_HANDLER TSC_IRQHandler
ISR_HANDLER DMA_CH1_IRQHandler
ISR_HANDLER DMA_CH2_3_IRQHandler
ISR_HANDLER DMA_CH4_5_6_7_IRQHandler
ISR_HANDLER ADC_COMP_IRQHandler
ISR_HANDLER TIM1_BRK_UP_TRG_COM_IRQHandler
ISR_HANDLER TIM1_CC_IRQHandler
ISR_HANDLER TIM2_IRQHandler
ISR_HANDLER TIM3_IRQHandler
ISR_HANDLER TIM6_DAC_IRQHandler
ISR_HANDLER TIM7_IRQHandler
ISR_HANDLER TIM14_IRQHandler
ISR_HANDLER TIM15_IRQHandler
ISR_HANDLER TIM16_IRQHandler
ISR_HANDLER TIM17_IRQHandler
ISR_HANDLER I2C1_IRQHandler
ISR_HANDLER I2C2_IRQHandler
ISR_HANDLER SPI1_IRQHandler
ISR_HANDLER SPI2_IRQHandler
ISR_HANDLER USART1_IRQHandler
ISR_HANDLER USART2_IRQHandler
ISR_HANDLER USART3_4_IRQHandler
ISR_HANDLER CEC_CAN_IRQHandler
ISR_HANDLER USB_IRQHandler
#endif
//
.section .vectors, "ax"
_vectors_end:
/*********************************************************************
*
* Dummy handler to be used for reserved interrupt vectors
* and weak implementation of interrupts.
*
*/
.section .init.Dummy_Handler, "ax"
.thumb_func
.weak Dummy_Handler
.balign 2
Dummy_Handler:
1: b 1b // Endless loop
/*************************** End of file ****************************/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/RTC/RTC_Tamper/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/RTC/RTC_Tamper/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/RTC/RTC_Tamper/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/RTC/RTC_Alarm/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/RTC/RTC_Alarm/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/RTC/RTC_Alarm/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aerror2/erfly6
| 4,468
|
Kinetis_KL/Source/Kinetis_KL_Startup.s
|
/*****************************************************************************
* SEGGER Microcontroller GmbH & Co. KG *
* Solutions for real time microcontroller applications *
*****************************************************************************
* *
* (c) 2017 SEGGER Microcontroller GmbH & Co. KG *
* *
* Internet: www.segger.com Support: support@segger.com *
* *
*****************************************************************************/
/*****************************************************************************
* Preprocessor Definitions *
* ------------------------ *
* NO_STACK_INIT *
* *
* If defined, the stack pointer will not be initialised. *
* *
* NO_SYSTEM_INIT *
* *
* If defined, the SystemInit() function will not be called. By default *
* SystemInit() is called after reset to enable the clocks and memories to *
* be initialised prior to any C startup initialisation. *
* *
* NO_VTOR_CONFIG *
* *
* If defined, the vector table offset register will not be configured. *
* *
* MEMORY_INIT *
* *
* If defined, the MemoryInit() function will be called. By default *
* MemoryInit() is called after SystemInit() to enable an external memory *
* controller. *
* *
* STACK_INIT_VAL *
* *
* If defined, specifies the initial stack pointer value. If undefined, *
* the stack pointer will be initialised to point to the end of the *
* RAM segment. *
* *
* VECTORS_IN_RAM *
* *
* If defined, the exception vectors will be copied from Flash to RAM. *
* *
*****************************************************************************/
.syntax unified
.global Reset_Handler
.extern _vectors
.section .init, "ax"
.thumb_func
.equ VTOR_REG, 0xE000ED08
#ifndef STACK_INIT_VAL
#define STACK_INIT_VAL __RAM_segment_end__
#endif
Reset_Handler:
#ifndef NO_STACK_INIT
/* Initialise main stack */
ldr r0, =STACK_INIT_VAL
ldr r1, =0x7
bics r0, r1
mov sp, r0
#endif
#ifndef NO_SYSTEM_INIT
/* Initialise system */
ldr r0, =SystemInit
blx r0
#endif
#ifdef MEMORY_INIT
ldr r0, =MemoryInit
blx r0
#endif
#ifdef VECTORS_IN_RAM
/* Copy exception vectors into RAM */
ldr r0, =__vectors_start__
ldr r1, =__vectors_end__
ldr r2, =__vectors_ram_start__
1:
cmp r0, r1
beq 2f
ldr r3, [r0]
str r3, [r2]
adds r0, r0, #4
adds r2, r2, #4
b 1b
2:
#endif
#ifndef NO_VTOR_CONFIG
/* Configure vector table offset register */
ldr r0, =VTOR_REG
#ifdef VECTORS_IN_RAM
ldr r1, =_vectors_ram
#else
ldr r1, =_vectors
#endif
str r1, [r0]
#endif
/* Jump to program start */
b _start
|
aerror2/erfly6
| 8,375
|
Kinetis_KL/Source/MKL16Z4_Vectors.s
|
/*****************************************************************************
* SEGGER Microcontroller GmbH & Co. KG *
* Solutions for real time microcontroller applications *
*****************************************************************************
* *
* (c) 2017 SEGGER Microcontroller GmbH & Co. KG *
* *
* Internet: www.segger.com Support: support@segger.com *
* *
*****************************************************************************/
/*****************************************************************************
* Preprocessor Definitions *
* ------------------------ *
* VECTORS_IN_RAM *
* *
* If defined, an area of RAM will large enough to store the vector table *
* will be reserved. *
* *
*****************************************************************************/
.syntax unified
.code 16
.section .init, "ax"
.align 0
/*****************************************************************************
* Default Exception Handlers *
*****************************************************************************/
.thumb_func
.weak NMI_Handler
NMI_Handler:
b .
.thumb_func
.weak HardFault_Handler
HardFault_Handler:
b .
.thumb_func
.weak SVC_Handler
SVC_Handler:
b .
.thumb_func
.weak PendSV_Handler
PendSV_Handler:
b .
.thumb_func
.weak SysTick_Handler
SysTick_Handler:
b .
.thumb_func
Dummy_Handler:
b .
#if defined(__OPTIMIZATION_SMALL)
.weak DMA0_IRQHandler
.thumb_set DMA0_IRQHandler,Dummy_Handler
.weak DMA1_IRQHandler
.thumb_set DMA1_IRQHandler,Dummy_Handler
.weak DMA2_IRQHandler
.thumb_set DMA2_IRQHandler,Dummy_Handler
.weak DMA3_IRQHandler
.thumb_set DMA3_IRQHandler,Dummy_Handler
.weak FTFA_IRQHandler
.thumb_set FTFA_IRQHandler,Dummy_Handler
.weak LVD_LVW_IRQHandler
.thumb_set LVD_LVW_IRQHandler,Dummy_Handler
.weak LLW_IRQHandler
.thumb_set LLW_IRQHandler,Dummy_Handler
.weak I2C0_IRQHandler
.thumb_set I2C0_IRQHandler,Dummy_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Dummy_Handler
.weak SPI0_IRQHandler
.thumb_set SPI0_IRQHandler,Dummy_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Dummy_Handler
.weak UART0_IRQHandler
.thumb_set UART0_IRQHandler,Dummy_Handler
.weak UART1_IRQHandler
.thumb_set UART1_IRQHandler,Dummy_Handler
.weak UART2_IRQHandler
.thumb_set UART2_IRQHandler,Dummy_Handler
.weak ADC0_IRQHandler
.thumb_set ADC0_IRQHandler,Dummy_Handler
.weak CMP0_IRQHandler
.thumb_set CMP0_IRQHandler,Dummy_Handler
.weak TPM0_IRQHandler
.thumb_set TPM0_IRQHandler,Dummy_Handler
.weak TPM1_IRQHandler
.thumb_set TPM1_IRQHandler,Dummy_Handler
.weak TPM2_IRQHandler
.thumb_set TPM2_IRQHandler,Dummy_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Dummy_Handler
.weak RTC_Seconds_IRQHandler
.thumb_set RTC_Seconds_IRQHandler,Dummy_Handler
.weak PIT_IRQHandler
.thumb_set PIT_IRQHandler,Dummy_Handler
.weak I2S0_IRQHandler
.thumb_set I2S0_IRQHandler,Dummy_Handler
.weak DAC0_IRQHandler
.thumb_set DAC0_IRQHandler,Dummy_Handler
.weak TSI0_IRQHandler
.thumb_set TSI0_IRQHandler,Dummy_Handler
.weak LPTimer_IRQHandler
.thumb_set LPTimer_IRQHandler,Dummy_Handler
.weak PORTA_IRQHandler
.thumb_set PORTA_IRQHandler,Dummy_Handler
.weak PORTC_PORTD_IRQHandler
.thumb_set PORTC_PORTD_IRQHandler,Dummy_Handler
#else
.thumb_func
.weak DMA0_IRQHandler
DMA0_IRQHandler:
b .
.thumb_func
.weak DMA1_IRQHandler
DMA1_IRQHandler:
b .
.thumb_func
.weak DMA2_IRQHandler
DMA2_IRQHandler:
b .
.thumb_func
.weak DMA3_IRQHandler
DMA3_IRQHandler:
b .
.thumb_func
.weak FTFA_IRQHandler
FTFA_IRQHandler:
b .
.thumb_func
.weak LVD_LVW_IRQHandler
LVD_LVW_IRQHandler:
b .
.thumb_func
.weak LLW_IRQHandler
LLW_IRQHandler:
b .
.thumb_func
.weak I2C0_IRQHandler
I2C0_IRQHandler:
b .
.thumb_func
.weak I2C1_IRQHandler
I2C1_IRQHandler:
b .
.thumb_func
.weak SPI0_IRQHandler
SPI0_IRQHandler:
b .
.thumb_func
.weak SPI1_IRQHandler
SPI1_IRQHandler:
b .
.thumb_func
.weak UART0_IRQHandler
UART0_IRQHandler:
b .
.thumb_func
.weak UART1_IRQHandler
UART1_IRQHandler:
b .
.thumb_func
.weak UART2_IRQHandler
UART2_IRQHandler:
b .
.thumb_func
.weak ADC0_IRQHandler
ADC0_IRQHandler:
b .
.thumb_func
.weak CMP0_IRQHandler
CMP0_IRQHandler:
b .
.thumb_func
.weak TPM0_IRQHandler
TPM0_IRQHandler:
b .
.thumb_func
.weak TPM1_IRQHandler
TPM1_IRQHandler:
b .
.thumb_func
.weak TPM2_IRQHandler
TPM2_IRQHandler:
b .
.thumb_func
.weak RTC_IRQHandler
RTC_IRQHandler:
b .
.thumb_func
.weak RTC_Seconds_IRQHandler
RTC_Seconds_IRQHandler:
b .
.thumb_func
.weak PIT_IRQHandler
PIT_IRQHandler:
b .
.thumb_func
.weak I2S0_IRQHandler
I2S0_IRQHandler:
b .
.thumb_func
.weak DAC0_IRQHandler
DAC0_IRQHandler:
b .
.thumb_func
.weak TSI0_IRQHandler
TSI0_IRQHandler:
b .
.thumb_func
.weak LPTimer_IRQHandler
LPTimer_IRQHandler:
b .
.thumb_func
.weak PORTA_IRQHandler
PORTA_IRQHandler:
b .
.thumb_func
.weak PORTC_PORTD_IRQHandler
PORTC_PORTD_IRQHandler:
b .
#endif
/*****************************************************************************
* Vector Table *
*****************************************************************************/
.section .vectors, "ax"
.align 0
.global _vectors
.extern __stack_end__
.extern Reset_Handler
_vectors:
.word __stack_end__
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SVC_Handler
.word 0 /* Reserved */
.word 0 /* Reserved */
.word PendSV_Handler
.word SysTick_Handler
.word DMA0_IRQHandler
.word DMA1_IRQHandler
.word DMA2_IRQHandler
.word DMA3_IRQHandler
.word Dummy_Handler /* Reserved */
.word FTFA_IRQHandler
.word LVD_LVW_IRQHandler
.word LLW_IRQHandler
.word I2C0_IRQHandler
.word I2C1_IRQHandler
.word SPI0_IRQHandler
.word SPI1_IRQHandler
.word UART0_IRQHandler
.word UART1_IRQHandler
.word UART2_IRQHandler
.word ADC0_IRQHandler
.word CMP0_IRQHandler
.word TPM0_IRQHandler
.word TPM1_IRQHandler
.word TPM2_IRQHandler
.word RTC_IRQHandler
.word RTC_Seconds_IRQHandler
.word PIT_IRQHandler
.word I2S0_IRQHandler
.word Dummy_Handler /* Reserved */
.word DAC0_IRQHandler
.word TSI0_IRQHandler
.word Dummy_Handler /* Reserved */
.word LPTimer_IRQHandler
.word Dummy_Handler /* Reserved */
.word PORTA_IRQHandler
.word PORTC_PORTD_IRQHandler
_vectors_end:
#if 1
// .section .vectors, "ax"
.section .cfm, "ax"
// fill to 0x400 for the flash configuration field
//.fill 0x400-(_vectors_end-_vectors), 1, 0xff
//.org 0x400, 0xFF
BackDoorKey:
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
#if defined(E_SERIES)
RESERVED:
.byte 0xff, 0xff, 0xff, 0xff
EEPROT:
.byte 0xff
FPROT:
.byte 0xff
FSEC:
.byte 0xfe
FOPT:
.byte 0xff
#else
FPROT:
.byte 0xff, 0xff, 0xff, 0xff
FSEC:
.byte 0xfe
FOPT:
#if defined(MKL03Z4) || defined(MKL17Z4) || defined(MKL17Z644) || defined(MKL27Z4) || defined(MKL27Z644) || defined(MKL33Z4) || defined(MKL33Z644) || defined(MKL43Z4)
.byte 0x3b
#else
.byte 0xff
#endif
FEPROT:
.byte 0xff
FDPROT:
.byte 0xff
#endif
#endif
#ifdef VECTORS_IN_RAM
.section .vectors_ram, "ax"
.align 0
.global _vectors_ram
_vectors_ram:
.space _vectors_end - _vectors, 0
#endif
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_ComPolling/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_ComPolling/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_ComPolling/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_AdvComIT/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_AdvComIT/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_AdvComIT/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aenu1/aps3e
| 88,430
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/aes_xts_asm.S
|
/* aes_xts_asm.S */
/*
* Copyright (C) 2006-2024 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef WOLFSSL_USER_SETTINGS
#ifdef WOLFSSL_USER_SETTINGS_ASM
/*
* user_settings_asm.h is a file generated by the script user_settings_asm.sh.
* The script takes in a user_settings.h and produces user_settings_asm.h, which
* is a stripped down version of user_settings.h containing only preprocessor
* directives. This makes the header safe to include in assembly (.S) files.
*/
#include "user_settings_asm.h"
#else
/*
* Note: if user_settings.h contains any C code (e.g. a typedef or function
* prototype), including it here in an assembly (.S) file will cause an
* assembler failure. See user_settings_asm.h above.
*/
#include "user_settings.h"
#endif /* WOLFSSL_USER_SETTINGS_ASM */
#endif /* WOLFSSL_USER_SETTINGS */
#ifndef HAVE_INTEL_AVX1
#define HAVE_INTEL_AVX1
#endif /* HAVE_INTEL_AVX1 */
#ifndef NO_AVX2_SUPPORT
#define HAVE_INTEL_AVX2
#endif /* NO_AVX2_SUPPORT */
#ifdef WOLFSSL_AES_XTS
#ifdef WOLFSSL_X86_64_BUILD
#ifndef __APPLE__
.text
.globl AES_XTS_init_aesni
.type AES_XTS_init_aesni,@function
.align 16
AES_XTS_init_aesni:
#else
.section __TEXT,__text
.globl _AES_XTS_init_aesni
.p2align 4
_AES_XTS_init_aesni:
#endif /* __APPLE__ */
movdqu (%rdi), %xmm0
# aes_enc_block
pxor (%rsi), %xmm0
movdqu 16(%rsi), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%rsi), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%rsi), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%rsi), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%rsi), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%rsi), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%rsi), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%rsi), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%rsi), %xmm2
aesenc %xmm2, %xmm0
cmpl $11, %edx
movdqu 160(%rsi), %xmm2
jl L_AES_XTS_init_aesni_tweak_aes_enc_block_last
aesenc %xmm2, %xmm0
movdqu 176(%rsi), %xmm3
aesenc %xmm3, %xmm0
cmpl $13, %edx
movdqu 192(%rsi), %xmm2
jl L_AES_XTS_init_aesni_tweak_aes_enc_block_last
aesenc %xmm2, %xmm0
movdqu 208(%rsi), %xmm3
aesenc %xmm3, %xmm0
movdqu 224(%rsi), %xmm2
L_AES_XTS_init_aesni_tweak_aes_enc_block_last:
aesenclast %xmm2, %xmm0
movdqu %xmm0, (%rdi)
repz retq
#ifndef __APPLE__
.size AES_XTS_init_aesni,.-AES_XTS_init_aesni
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_aes_xts_gc_xts:
.long 0x87,0x1,0x1,0x1
#ifndef __APPLE__
.text
.globl AES_XTS_encrypt_aesni
.type AES_XTS_encrypt_aesni,@function
.align 16
AES_XTS_encrypt_aesni:
#else
.section __TEXT,__text
.globl _AES_XTS_encrypt_aesni
.p2align 4
_AES_XTS_encrypt_aesni:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
movq %rdx, %rax
movq %rcx, %r12
movl 24(%rsp), %r10d
subq $0x40, %rsp
movdqu L_aes_xts_gc_xts(%rip), %xmm12
movdqu (%r12), %xmm0
# aes_enc_block
pxor (%r9), %xmm0
movdqu 16(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 32(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 48(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 64(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 80(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 96(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 112(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 128(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 144(%r9), %xmm5
aesenc %xmm5, %xmm0
cmpl $11, %r10d
movdqu 160(%r9), %xmm5
jl L_AES_XTS_encrypt_aesni_tweak_aes_enc_block_last
aesenc %xmm5, %xmm0
movdqu 176(%r9), %xmm6
aesenc %xmm6, %xmm0
cmpl $13, %r10d
movdqu 192(%r9), %xmm5
jl L_AES_XTS_encrypt_aesni_tweak_aes_enc_block_last
aesenc %xmm5, %xmm0
movdqu 208(%r9), %xmm6
aesenc %xmm6, %xmm0
movdqu 224(%r9), %xmm5
L_AES_XTS_encrypt_aesni_tweak_aes_enc_block_last:
aesenclast %xmm5, %xmm0
xorl %r13d, %r13d
cmpl $0x40, %eax
movl %eax, %r11d
jl L_AES_XTS_encrypt_aesni_done_64
andl $0xffffffc0, %r11d
L_AES_XTS_encrypt_aesni_enc_64:
# 64 bytes of input
# aes_enc_64
leaq (%rdi,%r13,1), %rcx
leaq (%rsi,%r13,1), %rdx
movdqu (%rcx), %xmm8
movdqu 16(%rcx), %xmm9
movdqu 32(%rcx), %xmm10
movdqu 48(%rcx), %xmm11
movdqa %xmm0, %xmm4
movdqa %xmm0, %xmm1
psrad $31, %xmm4
pslld $0x01, %xmm1
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm1
movdqa %xmm1, %xmm4
movdqa %xmm1, %xmm2
psrad $31, %xmm4
pslld $0x01, %xmm2
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm2
movdqa %xmm2, %xmm4
movdqa %xmm2, %xmm3
psrad $31, %xmm4
pslld $0x01, %xmm3
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm3
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
pxor %xmm2, %xmm10
pxor %xmm3, %xmm11
# aes_enc_block
movdqu (%r8), %xmm4
pxor %xmm4, %xmm8
pxor %xmm4, %xmm9
pxor %xmm4, %xmm10
pxor %xmm4, %xmm11
movdqu 16(%r8), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 32(%r8), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 48(%r8), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 64(%r8), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 80(%r8), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 96(%r8), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 112(%r8), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 128(%r8), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 144(%r8), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
cmpl $11, %r10d
movdqu 160(%r8), %xmm4
jl L_AES_XTS_encrypt_aesni_aes_enc_64_aes_enc_block_last
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 176(%r8), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
cmpl $13, %r10d
movdqu 192(%r8), %xmm4
jl L_AES_XTS_encrypt_aesni_aes_enc_64_aes_enc_block_last
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 208(%r8), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 224(%r8), %xmm4
L_AES_XTS_encrypt_aesni_aes_enc_64_aes_enc_block_last:
aesenclast %xmm4, %xmm8
aesenclast %xmm4, %xmm9
aesenclast %xmm4, %xmm10
aesenclast %xmm4, %xmm11
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
pxor %xmm2, %xmm10
pxor %xmm3, %xmm11
movdqu %xmm8, (%rdx)
movdqu %xmm9, 16(%rdx)
movdqu %xmm10, 32(%rdx)
movdqu %xmm11, 48(%rdx)
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm0
psrad $31, %xmm4
pslld $0x01, %xmm0
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm0
addl $0x40, %r13d
cmpl %r11d, %r13d
jl L_AES_XTS_encrypt_aesni_enc_64
L_AES_XTS_encrypt_aesni_done_64:
cmpl %eax, %r13d
movl %eax, %r11d
je L_AES_XTS_encrypt_aesni_done_enc
subl %r13d, %r11d
cmpl $16, %r11d
movl %eax, %r11d
jl L_AES_XTS_encrypt_aesni_last_15
andl $0xfffffff0, %r11d
# 16 bytes of input
L_AES_XTS_encrypt_aesni_enc_16:
leaq (%rdi,%r13,1), %rcx
movdqu (%rcx), %xmm8
pxor %xmm0, %xmm8
# aes_enc_block
pxor (%r8), %xmm8
movdqu 16(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 32(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 48(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 64(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 80(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 96(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 112(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 128(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 144(%r8), %xmm5
aesenc %xmm5, %xmm8
cmpl $11, %r10d
movdqu 160(%r8), %xmm5
jl L_AES_XTS_encrypt_aesni_aes_enc_block_last
aesenc %xmm5, %xmm8
movdqu 176(%r8), %xmm6
aesenc %xmm6, %xmm8
cmpl $13, %r10d
movdqu 192(%r8), %xmm5
jl L_AES_XTS_encrypt_aesni_aes_enc_block_last
aesenc %xmm5, %xmm8
movdqu 208(%r8), %xmm6
aesenc %xmm6, %xmm8
movdqu 224(%r8), %xmm5
L_AES_XTS_encrypt_aesni_aes_enc_block_last:
aesenclast %xmm5, %xmm8
pxor %xmm0, %xmm8
leaq (%rsi,%r13,1), %rcx
movdqu %xmm8, (%rcx)
movdqa %xmm0, %xmm4
psrad $31, %xmm4
pslld $0x01, %xmm0
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm0
addl $16, %r13d
cmpl %r11d, %r13d
jl L_AES_XTS_encrypt_aesni_enc_16
cmpl %eax, %r13d
je L_AES_XTS_encrypt_aesni_done_enc
L_AES_XTS_encrypt_aesni_last_15:
subq $16, %r13
leaq (%rsi,%r13,1), %rcx
movdqu (%rcx), %xmm8
addq $16, %r13
movdqu %xmm8, (%rsp)
xorq %rdx, %rdx
L_AES_XTS_encrypt_aesni_last_15_byte_loop:
movb (%rsp,%rdx,1), %r11b
movb (%rdi,%r13,1), %cl
movb %r11b, (%rsi,%r13,1)
movb %cl, (%rsp,%rdx,1)
incl %r13d
incl %edx
cmpl %eax, %r13d
jl L_AES_XTS_encrypt_aesni_last_15_byte_loop
subq %rdx, %r13
movdqu (%rsp), %xmm8
subq $16, %r13
pxor %xmm0, %xmm8
# aes_enc_block
pxor (%r8), %xmm8
movdqu 16(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 32(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 48(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 64(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 80(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 96(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 112(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 128(%r8), %xmm5
aesenc %xmm5, %xmm8
movdqu 144(%r8), %xmm5
aesenc %xmm5, %xmm8
cmpl $11, %r10d
movdqu 160(%r8), %xmm5
jl L_AES_XTS_encrypt_aesni_last_15_aes_enc_block_last
aesenc %xmm5, %xmm8
movdqu 176(%r8), %xmm6
aesenc %xmm6, %xmm8
cmpl $13, %r10d
movdqu 192(%r8), %xmm5
jl L_AES_XTS_encrypt_aesni_last_15_aes_enc_block_last
aesenc %xmm5, %xmm8
movdqu 208(%r8), %xmm6
aesenc %xmm6, %xmm8
movdqu 224(%r8), %xmm5
L_AES_XTS_encrypt_aesni_last_15_aes_enc_block_last:
aesenclast %xmm5, %xmm8
pxor %xmm0, %xmm8
leaq (%rsi,%r13,1), %rcx
movdqu %xmm8, (%rcx)
L_AES_XTS_encrypt_aesni_done_enc:
addq $0x40, %rsp
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size AES_XTS_encrypt_aesni,.-AES_XTS_encrypt_aesni
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_XTS_encrypt_update_aesni
.type AES_XTS_encrypt_update_aesni,@function
.align 16
AES_XTS_encrypt_update_aesni:
#else
.section __TEXT,__text
.globl _AES_XTS_encrypt_update_aesni
.p2align 4
_AES_XTS_encrypt_update_aesni:
#endif /* __APPLE__ */
pushq %r12
movq %rdx, %rax
movq %rcx, %r10
subq $0x40, %rsp
movdqu L_aes_xts_gc_xts(%rip), %xmm12
movdqu (%r8), %xmm0
xorl %r12d, %r12d
cmpl $0x40, %eax
movl %eax, %r11d
jl L_AES_XTS_encrypt_update_aesni_done_64
andl $0xffffffc0, %r11d
L_AES_XTS_encrypt_update_aesni_enc_64:
# 64 bytes of input
# aes_enc_64
leaq (%rdi,%r12,1), %rcx
leaq (%rsi,%r12,1), %rdx
movdqu (%rcx), %xmm8
movdqu 16(%rcx), %xmm9
movdqu 32(%rcx), %xmm10
movdqu 48(%rcx), %xmm11
movdqa %xmm0, %xmm4
movdqa %xmm0, %xmm1
psrad $31, %xmm4
pslld $0x01, %xmm1
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm1
movdqa %xmm1, %xmm4
movdqa %xmm1, %xmm2
psrad $31, %xmm4
pslld $0x01, %xmm2
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm2
movdqa %xmm2, %xmm4
movdqa %xmm2, %xmm3
psrad $31, %xmm4
pslld $0x01, %xmm3
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm3
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
pxor %xmm2, %xmm10
pxor %xmm3, %xmm11
# aes_enc_block
movdqu (%r10), %xmm4
pxor %xmm4, %xmm8
pxor %xmm4, %xmm9
pxor %xmm4, %xmm10
pxor %xmm4, %xmm11
movdqu 16(%r10), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 32(%r10), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 48(%r10), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 64(%r10), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 80(%r10), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 96(%r10), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 112(%r10), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 128(%r10), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 144(%r10), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
cmpl $11, %r9d
movdqu 160(%r10), %xmm4
jl L_AES_XTS_encrypt_update_aesni_aes_enc_64_aes_enc_block_last
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 176(%r10), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
cmpl $13, %r9d
movdqu 192(%r10), %xmm4
jl L_AES_XTS_encrypt_update_aesni_aes_enc_64_aes_enc_block_last
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 208(%r10), %xmm4
aesenc %xmm4, %xmm8
aesenc %xmm4, %xmm9
aesenc %xmm4, %xmm10
aesenc %xmm4, %xmm11
movdqu 224(%r10), %xmm4
L_AES_XTS_encrypt_update_aesni_aes_enc_64_aes_enc_block_last:
aesenclast %xmm4, %xmm8
aesenclast %xmm4, %xmm9
aesenclast %xmm4, %xmm10
aesenclast %xmm4, %xmm11
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
pxor %xmm2, %xmm10
pxor %xmm3, %xmm11
movdqu %xmm8, (%rdx)
movdqu %xmm9, 16(%rdx)
movdqu %xmm10, 32(%rdx)
movdqu %xmm11, 48(%rdx)
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm0
psrad $31, %xmm4
pslld $0x01, %xmm0
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm0
addl $0x40, %r12d
cmpl %r11d, %r12d
jl L_AES_XTS_encrypt_update_aesni_enc_64
L_AES_XTS_encrypt_update_aesni_done_64:
cmpl %eax, %r12d
movl %eax, %r11d
je L_AES_XTS_encrypt_update_aesni_done_enc
subl %r12d, %r11d
cmpl $16, %r11d
movl %eax, %r11d
jl L_AES_XTS_encrypt_update_aesni_last_15
andl $0xfffffff0, %r11d
# 16 bytes of input
L_AES_XTS_encrypt_update_aesni_enc_16:
leaq (%rdi,%r12,1), %rcx
movdqu (%rcx), %xmm8
pxor %xmm0, %xmm8
# aes_enc_block
pxor (%r10), %xmm8
movdqu 16(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 32(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 48(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 64(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 80(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 96(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 112(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 128(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 144(%r10), %xmm5
aesenc %xmm5, %xmm8
cmpl $11, %r9d
movdqu 160(%r10), %xmm5
jl L_AES_XTS_encrypt_update_aesni_aes_enc_block_last
aesenc %xmm5, %xmm8
movdqu 176(%r10), %xmm6
aesenc %xmm6, %xmm8
cmpl $13, %r9d
movdqu 192(%r10), %xmm5
jl L_AES_XTS_encrypt_update_aesni_aes_enc_block_last
aesenc %xmm5, %xmm8
movdqu 208(%r10), %xmm6
aesenc %xmm6, %xmm8
movdqu 224(%r10), %xmm5
L_AES_XTS_encrypt_update_aesni_aes_enc_block_last:
aesenclast %xmm5, %xmm8
pxor %xmm0, %xmm8
leaq (%rsi,%r12,1), %rcx
movdqu %xmm8, (%rcx)
movdqa %xmm0, %xmm4
psrad $31, %xmm4
pslld $0x01, %xmm0
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm0
addl $16, %r12d
cmpl %r11d, %r12d
jl L_AES_XTS_encrypt_update_aesni_enc_16
cmpl %eax, %r12d
je L_AES_XTS_encrypt_update_aesni_done_enc
L_AES_XTS_encrypt_update_aesni_last_15:
subq $16, %r12
leaq (%rsi,%r12,1), %rcx
movdqu (%rcx), %xmm8
addq $16, %r12
movdqu %xmm8, (%rsp)
xorq %rdx, %rdx
L_AES_XTS_encrypt_update_aesni_last_15_byte_loop:
movb (%rsp,%rdx,1), %r11b
movb (%rdi,%r12,1), %cl
movb %r11b, (%rsi,%r12,1)
movb %cl, (%rsp,%rdx,1)
incl %r12d
incl %edx
cmpl %eax, %r12d
jl L_AES_XTS_encrypt_update_aesni_last_15_byte_loop
subq %rdx, %r12
movdqu (%rsp), %xmm8
subq $16, %r12
pxor %xmm0, %xmm8
# aes_enc_block
pxor (%r10), %xmm8
movdqu 16(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 32(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 48(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 64(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 80(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 96(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 112(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 128(%r10), %xmm5
aesenc %xmm5, %xmm8
movdqu 144(%r10), %xmm5
aesenc %xmm5, %xmm8
cmpl $11, %r9d
movdqu 160(%r10), %xmm5
jl L_AES_XTS_encrypt_update_aesni_last_15_aes_enc_block_last
aesenc %xmm5, %xmm8
movdqu 176(%r10), %xmm6
aesenc %xmm6, %xmm8
cmpl $13, %r9d
movdqu 192(%r10), %xmm5
jl L_AES_XTS_encrypt_update_aesni_last_15_aes_enc_block_last
aesenc %xmm5, %xmm8
movdqu 208(%r10), %xmm6
aesenc %xmm6, %xmm8
movdqu 224(%r10), %xmm5
L_AES_XTS_encrypt_update_aesni_last_15_aes_enc_block_last:
aesenclast %xmm5, %xmm8
pxor %xmm0, %xmm8
leaq (%rsi,%r12,1), %rcx
movdqu %xmm8, (%rcx)
L_AES_XTS_encrypt_update_aesni_done_enc:
movdqu %xmm0, (%r8)
addq $0x40, %rsp
popq %r12
repz retq
#ifndef __APPLE__
.size AES_XTS_encrypt_update_aesni,.-AES_XTS_encrypt_update_aesni
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_XTS_decrypt_aesni
.type AES_XTS_decrypt_aesni,@function
.align 16
AES_XTS_decrypt_aesni:
#else
.section __TEXT,__text
.globl _AES_XTS_decrypt_aesni
.p2align 4
_AES_XTS_decrypt_aesni:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
movq %rdx, %rax
movq %rcx, %r12
movl 24(%rsp), %r10d
subq $16, %rsp
movdqu L_aes_xts_gc_xts(%rip), %xmm12
movdqu (%r12), %xmm0
# aes_enc_block
pxor (%r9), %xmm0
movdqu 16(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 32(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 48(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 64(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 80(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 96(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 112(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 128(%r9), %xmm5
aesenc %xmm5, %xmm0
movdqu 144(%r9), %xmm5
aesenc %xmm5, %xmm0
cmpl $11, %r10d
movdqu 160(%r9), %xmm5
jl L_AES_XTS_decrypt_aesni_tweak_aes_enc_block_last
aesenc %xmm5, %xmm0
movdqu 176(%r9), %xmm6
aesenc %xmm6, %xmm0
cmpl $13, %r10d
movdqu 192(%r9), %xmm5
jl L_AES_XTS_decrypt_aesni_tweak_aes_enc_block_last
aesenc %xmm5, %xmm0
movdqu 208(%r9), %xmm6
aesenc %xmm6, %xmm0
movdqu 224(%r9), %xmm5
L_AES_XTS_decrypt_aesni_tweak_aes_enc_block_last:
aesenclast %xmm5, %xmm0
xorl %r13d, %r13d
movl %eax, %r11d
andl $0xfffffff0, %r11d
cmpl %eax, %r11d
je L_AES_XTS_decrypt_aesni_mul16_64
subl $16, %r11d
cmpl $16, %r11d
jl L_AES_XTS_decrypt_aesni_last_31_start
L_AES_XTS_decrypt_aesni_mul16_64:
cmpl $0x40, %r11d
jl L_AES_XTS_decrypt_aesni_done_64
andl $0xffffffc0, %r11d
L_AES_XTS_decrypt_aesni_dec_64:
# 64 bytes of input
# aes_dec_64
leaq (%rdi,%r13,1), %rcx
leaq (%rsi,%r13,1), %rdx
movdqu (%rcx), %xmm8
movdqu 16(%rcx), %xmm9
movdqu 32(%rcx), %xmm10
movdqu 48(%rcx), %xmm11
movdqa %xmm0, %xmm4
movdqa %xmm0, %xmm1
psrad $31, %xmm4
pslld $0x01, %xmm1
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm1
movdqa %xmm1, %xmm4
movdqa %xmm1, %xmm2
psrad $31, %xmm4
pslld $0x01, %xmm2
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm2
movdqa %xmm2, %xmm4
movdqa %xmm2, %xmm3
psrad $31, %xmm4
pslld $0x01, %xmm3
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm3
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
pxor %xmm2, %xmm10
pxor %xmm3, %xmm11
# aes_dec_block
movdqu (%r8), %xmm4
pxor %xmm4, %xmm8
pxor %xmm4, %xmm9
pxor %xmm4, %xmm10
pxor %xmm4, %xmm11
movdqu 16(%r8), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 32(%r8), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 48(%r8), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 64(%r8), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 80(%r8), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 96(%r8), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 112(%r8), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 128(%r8), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 144(%r8), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
cmpl $11, %r10d
movdqu 160(%r8), %xmm4
jl L_AES_XTS_decrypt_aesni_aes_dec_64_aes_dec_block_last
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 176(%r8), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
cmpl $13, %r10d
movdqu 192(%r8), %xmm4
jl L_AES_XTS_decrypt_aesni_aes_dec_64_aes_dec_block_last
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 208(%r8), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 224(%r8), %xmm4
L_AES_XTS_decrypt_aesni_aes_dec_64_aes_dec_block_last:
aesdeclast %xmm4, %xmm8
aesdeclast %xmm4, %xmm9
aesdeclast %xmm4, %xmm10
aesdeclast %xmm4, %xmm11
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
pxor %xmm2, %xmm10
pxor %xmm3, %xmm11
movdqu %xmm8, (%rdx)
movdqu %xmm9, 16(%rdx)
movdqu %xmm10, 32(%rdx)
movdqu %xmm11, 48(%rdx)
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm0
psrad $31, %xmm4
pslld $0x01, %xmm0
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm0
addl $0x40, %r13d
cmpl %r11d, %r13d
jl L_AES_XTS_decrypt_aesni_dec_64
L_AES_XTS_decrypt_aesni_done_64:
cmpl %eax, %r13d
movl %eax, %r11d
je L_AES_XTS_decrypt_aesni_done_dec
andl $0xfffffff0, %r11d
cmpl %eax, %r11d
je L_AES_XTS_decrypt_aesni_mul16
subl $16, %r11d
subl %r13d, %r11d
cmpl $16, %r11d
jl L_AES_XTS_decrypt_aesni_last_31_start
addl %r13d, %r11d
L_AES_XTS_decrypt_aesni_mul16:
L_AES_XTS_decrypt_aesni_dec_16:
# 16 bytes of input
leaq (%rdi,%r13,1), %rcx
movdqu (%rcx), %xmm8
pxor %xmm0, %xmm8
# aes_dec_block
pxor (%r8), %xmm8
movdqu 16(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 32(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 48(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 64(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 80(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 96(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 112(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 128(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 144(%r8), %xmm5
aesdec %xmm5, %xmm8
cmpl $11, %r10d
movdqu 160(%r8), %xmm5
jl L_AES_XTS_decrypt_aesni_aes_dec_block_last
aesdec %xmm5, %xmm8
movdqu 176(%r8), %xmm6
aesdec %xmm6, %xmm8
cmpl $13, %r10d
movdqu 192(%r8), %xmm5
jl L_AES_XTS_decrypt_aesni_aes_dec_block_last
aesdec %xmm5, %xmm8
movdqu 208(%r8), %xmm6
aesdec %xmm6, %xmm8
movdqu 224(%r8), %xmm5
L_AES_XTS_decrypt_aesni_aes_dec_block_last:
aesdeclast %xmm5, %xmm8
pxor %xmm0, %xmm8
leaq (%rsi,%r13,1), %rcx
movdqu %xmm8, (%rcx)
movdqa %xmm0, %xmm4
psrad $31, %xmm4
pslld $0x01, %xmm0
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm0
addl $16, %r13d
cmpl %r11d, %r13d
jl L_AES_XTS_decrypt_aesni_dec_16
cmpl %eax, %r13d
je L_AES_XTS_decrypt_aesni_done_dec
L_AES_XTS_decrypt_aesni_last_31_start:
movdqa %xmm0, %xmm4
movdqa %xmm0, %xmm7
psrad $31, %xmm4
pslld $0x01, %xmm7
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm7
leaq (%rdi,%r13,1), %rcx
movdqu (%rcx), %xmm8
pxor %xmm7, %xmm8
# aes_dec_block
pxor (%r8), %xmm8
movdqu 16(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 32(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 48(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 64(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 80(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 96(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 112(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 128(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 144(%r8), %xmm5
aesdec %xmm5, %xmm8
cmpl $11, %r10d
movdqu 160(%r8), %xmm5
jl L_AES_XTS_decrypt_aesni_last_31_aes_dec_block_last
aesdec %xmm5, %xmm8
movdqu 176(%r8), %xmm6
aesdec %xmm6, %xmm8
cmpl $13, %r10d
movdqu 192(%r8), %xmm5
jl L_AES_XTS_decrypt_aesni_last_31_aes_dec_block_last
aesdec %xmm5, %xmm8
movdqu 208(%r8), %xmm6
aesdec %xmm6, %xmm8
movdqu 224(%r8), %xmm5
L_AES_XTS_decrypt_aesni_last_31_aes_dec_block_last:
aesdeclast %xmm5, %xmm8
pxor %xmm7, %xmm8
movdqu %xmm8, (%rsp)
addq $16, %r13
xorq %rdx, %rdx
L_AES_XTS_decrypt_aesni_last_31_byte_loop:
movb (%rsp,%rdx,1), %r11b
movb (%rdi,%r13,1), %cl
movb %r11b, (%rsi,%r13,1)
movb %cl, (%rsp,%rdx,1)
incl %r13d
incl %edx
cmpl %eax, %r13d
jl L_AES_XTS_decrypt_aesni_last_31_byte_loop
subq %rdx, %r13
movdqu (%rsp), %xmm8
pxor %xmm0, %xmm8
# aes_dec_block
pxor (%r8), %xmm8
movdqu 16(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 32(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 48(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 64(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 80(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 96(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 112(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 128(%r8), %xmm5
aesdec %xmm5, %xmm8
movdqu 144(%r8), %xmm5
aesdec %xmm5, %xmm8
cmpl $11, %r10d
movdqu 160(%r8), %xmm5
jl L_AES_XTS_decrypt_aesni_last_31_2_aes_dec_block_last
aesdec %xmm5, %xmm8
movdqu 176(%r8), %xmm6
aesdec %xmm6, %xmm8
cmpl $13, %r10d
movdqu 192(%r8), %xmm5
jl L_AES_XTS_decrypt_aesni_last_31_2_aes_dec_block_last
aesdec %xmm5, %xmm8
movdqu 208(%r8), %xmm6
aesdec %xmm6, %xmm8
movdqu 224(%r8), %xmm5
L_AES_XTS_decrypt_aesni_last_31_2_aes_dec_block_last:
aesdeclast %xmm5, %xmm8
pxor %xmm0, %xmm8
subq $16, %r13
leaq (%rsi,%r13,1), %rcx
movdqu %xmm8, (%rcx)
L_AES_XTS_decrypt_aesni_done_dec:
addq $16, %rsp
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size AES_XTS_decrypt_aesni,.-AES_XTS_decrypt_aesni
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_XTS_decrypt_update_aesni
.type AES_XTS_decrypt_update_aesni,@function
.align 16
AES_XTS_decrypt_update_aesni:
#else
.section __TEXT,__text
.globl _AES_XTS_decrypt_update_aesni
.p2align 4
_AES_XTS_decrypt_update_aesni:
#endif /* __APPLE__ */
pushq %r12
movq %rdx, %rax
movq %rcx, %r10
subq $16, %rsp
movdqu L_aes_xts_gc_xts(%rip), %xmm12
movdqu (%r8), %xmm0
xorl %r12d, %r12d
movl %eax, %r11d
andl $0xfffffff0, %r11d
cmpl %eax, %r11d
je L_AES_XTS_decrypt_update_aesni_mul16_64
subl $16, %r11d
cmpl $16, %r11d
jl L_AES_XTS_decrypt_update_aesni_last_31_start
L_AES_XTS_decrypt_update_aesni_mul16_64:
cmpl $0x40, %r11d
jl L_AES_XTS_decrypt_update_aesni_done_64
andl $0xffffffc0, %r11d
L_AES_XTS_decrypt_update_aesni_dec_64:
# 64 bytes of input
# aes_dec_64
leaq (%rdi,%r12,1), %rcx
leaq (%rsi,%r12,1), %rdx
movdqu (%rcx), %xmm8
movdqu 16(%rcx), %xmm9
movdqu 32(%rcx), %xmm10
movdqu 48(%rcx), %xmm11
movdqa %xmm0, %xmm4
movdqa %xmm0, %xmm1
psrad $31, %xmm4
pslld $0x01, %xmm1
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm1
movdqa %xmm1, %xmm4
movdqa %xmm1, %xmm2
psrad $31, %xmm4
pslld $0x01, %xmm2
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm2
movdqa %xmm2, %xmm4
movdqa %xmm2, %xmm3
psrad $31, %xmm4
pslld $0x01, %xmm3
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm3
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
pxor %xmm2, %xmm10
pxor %xmm3, %xmm11
# aes_dec_block
movdqu (%r10), %xmm4
pxor %xmm4, %xmm8
pxor %xmm4, %xmm9
pxor %xmm4, %xmm10
pxor %xmm4, %xmm11
movdqu 16(%r10), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 32(%r10), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 48(%r10), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 64(%r10), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 80(%r10), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 96(%r10), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 112(%r10), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 128(%r10), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 144(%r10), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
cmpl $11, %r9d
movdqu 160(%r10), %xmm4
jl L_AES_XTS_decrypt_update_aesni_aes_dec_64_aes_dec_block_last
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 176(%r10), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
cmpl $13, %r9d
movdqu 192(%r10), %xmm4
jl L_AES_XTS_decrypt_update_aesni_aes_dec_64_aes_dec_block_last
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 208(%r10), %xmm4
aesdec %xmm4, %xmm8
aesdec %xmm4, %xmm9
aesdec %xmm4, %xmm10
aesdec %xmm4, %xmm11
movdqu 224(%r10), %xmm4
L_AES_XTS_decrypt_update_aesni_aes_dec_64_aes_dec_block_last:
aesdeclast %xmm4, %xmm8
aesdeclast %xmm4, %xmm9
aesdeclast %xmm4, %xmm10
aesdeclast %xmm4, %xmm11
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
pxor %xmm2, %xmm10
pxor %xmm3, %xmm11
movdqu %xmm8, (%rdx)
movdqu %xmm9, 16(%rdx)
movdqu %xmm10, 32(%rdx)
movdqu %xmm11, 48(%rdx)
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm0
psrad $31, %xmm4
pslld $0x01, %xmm0
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm0
addl $0x40, %r12d
cmpl %r11d, %r12d
jl L_AES_XTS_decrypt_update_aesni_dec_64
L_AES_XTS_decrypt_update_aesni_done_64:
cmpl %eax, %r12d
movl %eax, %r11d
je L_AES_XTS_decrypt_update_aesni_done_dec
andl $0xfffffff0, %r11d
cmpl %eax, %r11d
je L_AES_XTS_decrypt_update_aesni_mul16
subl $16, %r11d
subl %r12d, %r11d
cmpl $16, %r11d
jl L_AES_XTS_decrypt_update_aesni_last_31_start
addl %r12d, %r11d
L_AES_XTS_decrypt_update_aesni_mul16:
L_AES_XTS_decrypt_update_aesni_dec_16:
# 16 bytes of input
leaq (%rdi,%r12,1), %rcx
movdqu (%rcx), %xmm8
pxor %xmm0, %xmm8
# aes_dec_block
pxor (%r10), %xmm8
movdqu 16(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 32(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 48(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 64(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 80(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 96(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 112(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 128(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 144(%r10), %xmm5
aesdec %xmm5, %xmm8
cmpl $11, %r9d
movdqu 160(%r10), %xmm5
jl L_AES_XTS_decrypt_update_aesni_aes_dec_block_last
aesdec %xmm5, %xmm8
movdqu 176(%r10), %xmm6
aesdec %xmm6, %xmm8
cmpl $13, %r9d
movdqu 192(%r10), %xmm5
jl L_AES_XTS_decrypt_update_aesni_aes_dec_block_last
aesdec %xmm5, %xmm8
movdqu 208(%r10), %xmm6
aesdec %xmm6, %xmm8
movdqu 224(%r10), %xmm5
L_AES_XTS_decrypt_update_aesni_aes_dec_block_last:
aesdeclast %xmm5, %xmm8
pxor %xmm0, %xmm8
leaq (%rsi,%r12,1), %rcx
movdqu %xmm8, (%rcx)
movdqa %xmm0, %xmm4
psrad $31, %xmm4
pslld $0x01, %xmm0
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm0
addl $16, %r12d
cmpl %r11d, %r12d
jl L_AES_XTS_decrypt_update_aesni_dec_16
cmpl %eax, %r12d
je L_AES_XTS_decrypt_update_aesni_done_dec
L_AES_XTS_decrypt_update_aesni_last_31_start:
movdqa %xmm0, %xmm4
movdqa %xmm0, %xmm7
psrad $31, %xmm4
pslld $0x01, %xmm7
pshufd $0x93, %xmm4, %xmm4
pand %xmm12, %xmm4
pxor %xmm4, %xmm7
leaq (%rdi,%r12,1), %rcx
movdqu (%rcx), %xmm8
pxor %xmm7, %xmm8
# aes_dec_block
pxor (%r10), %xmm8
movdqu 16(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 32(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 48(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 64(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 80(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 96(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 112(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 128(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 144(%r10), %xmm5
aesdec %xmm5, %xmm8
cmpl $11, %r9d
movdqu 160(%r10), %xmm5
jl L_AES_XTS_decrypt_update_aesni_last_31_aes_dec_block_last
aesdec %xmm5, %xmm8
movdqu 176(%r10), %xmm6
aesdec %xmm6, %xmm8
cmpl $13, %r9d
movdqu 192(%r10), %xmm5
jl L_AES_XTS_decrypt_update_aesni_last_31_aes_dec_block_last
aesdec %xmm5, %xmm8
movdqu 208(%r10), %xmm6
aesdec %xmm6, %xmm8
movdqu 224(%r10), %xmm5
L_AES_XTS_decrypt_update_aesni_last_31_aes_dec_block_last:
aesdeclast %xmm5, %xmm8
pxor %xmm7, %xmm8
movdqu %xmm8, (%rsp)
addq $16, %r12
xorq %rdx, %rdx
L_AES_XTS_decrypt_update_aesni_last_31_byte_loop:
movb (%rsp,%rdx,1), %r11b
movb (%rdi,%r12,1), %cl
movb %r11b, (%rsi,%r12,1)
movb %cl, (%rsp,%rdx,1)
incl %r12d
incl %edx
cmpl %eax, %r12d
jl L_AES_XTS_decrypt_update_aesni_last_31_byte_loop
subq %rdx, %r12
movdqu (%rsp), %xmm8
pxor %xmm0, %xmm8
# aes_dec_block
pxor (%r10), %xmm8
movdqu 16(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 32(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 48(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 64(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 80(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 96(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 112(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 128(%r10), %xmm5
aesdec %xmm5, %xmm8
movdqu 144(%r10), %xmm5
aesdec %xmm5, %xmm8
cmpl $11, %r9d
movdqu 160(%r10), %xmm5
jl L_AES_XTS_decrypt_update_aesni_last_31_2_aes_dec_block_last
aesdec %xmm5, %xmm8
movdqu 176(%r10), %xmm6
aesdec %xmm6, %xmm8
cmpl $13, %r9d
movdqu 192(%r10), %xmm5
jl L_AES_XTS_decrypt_update_aesni_last_31_2_aes_dec_block_last
aesdec %xmm5, %xmm8
movdqu 208(%r10), %xmm6
aesdec %xmm6, %xmm8
movdqu 224(%r10), %xmm5
L_AES_XTS_decrypt_update_aesni_last_31_2_aes_dec_block_last:
aesdeclast %xmm5, %xmm8
pxor %xmm0, %xmm8
subq $16, %r12
leaq (%rsi,%r12,1), %rcx
movdqu %xmm8, (%rcx)
L_AES_XTS_decrypt_update_aesni_done_dec:
movdqu %xmm0, (%r8)
addq $16, %rsp
popq %r12
repz retq
#ifndef __APPLE__
.size AES_XTS_decrypt_update_aesni,.-AES_XTS_decrypt_update_aesni
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX1
#ifndef __APPLE__
.text
.globl AES_XTS_init_avx1
.type AES_XTS_init_avx1,@function
.align 16
AES_XTS_init_avx1:
#else
.section __TEXT,__text
.globl _AES_XTS_init_avx1
.p2align 4
_AES_XTS_init_avx1:
#endif /* __APPLE__ */
movl %edx, %eax
vmovdqu (%rdi), %xmm0
# aes_enc_block
vpxor (%rsi), %xmm0, %xmm0
vmovdqu 16(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 32(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 48(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 64(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 80(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 96(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 112(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 128(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 144(%rsi), %xmm2
vaesenc %xmm2, %xmm0, %xmm0
cmpl $11, %eax
vmovdqu 160(%rsi), %xmm2
jl L_AES_XTS_init_avx1_tweak_aes_enc_block_last
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 176(%rsi), %xmm3
vaesenc %xmm3, %xmm0, %xmm0
cmpl $13, %eax
vmovdqu 192(%rsi), %xmm2
jl L_AES_XTS_init_avx1_tweak_aes_enc_block_last
vaesenc %xmm2, %xmm0, %xmm0
vmovdqu 208(%rsi), %xmm3
vaesenc %xmm3, %xmm0, %xmm0
vmovdqu 224(%rsi), %xmm2
L_AES_XTS_init_avx1_tweak_aes_enc_block_last:
vaesenclast %xmm2, %xmm0, %xmm0
vmovdqu %xmm0, (%rdi)
repz retq
#ifndef __APPLE__
.size AES_XTS_init_avx1,.-AES_XTS_init_avx1
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_avx1_aes_xts_gc_xts:
.long 0x87,0x1,0x1,0x1
#ifndef __APPLE__
.text
.globl AES_XTS_encrypt_avx1
.type AES_XTS_encrypt_avx1,@function
.align 16
AES_XTS_encrypt_avx1:
#else
.section __TEXT,__text
.globl _AES_XTS_encrypt_avx1
.p2align 4
_AES_XTS_encrypt_avx1:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
movq %rdx, %rax
movq %rcx, %r12
movl 24(%rsp), %r10d
subq $0x40, %rsp
vmovdqu L_avx1_aes_xts_gc_xts(%rip), %xmm12
vmovdqu (%r12), %xmm0
# aes_enc_block
vpxor (%r9), %xmm0, %xmm0
vmovdqu 16(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 32(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 48(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 64(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 80(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 96(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 112(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 128(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 144(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
cmpl $11, %r10d
vmovdqu 160(%r9), %xmm5
jl L_AES_XTS_encrypt_avx1_tweak_aes_enc_block_last
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 176(%r9), %xmm6
vaesenc %xmm6, %xmm0, %xmm0
cmpl $13, %r10d
vmovdqu 192(%r9), %xmm5
jl L_AES_XTS_encrypt_avx1_tweak_aes_enc_block_last
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 208(%r9), %xmm6
vaesenc %xmm6, %xmm0, %xmm0
vmovdqu 224(%r9), %xmm5
L_AES_XTS_encrypt_avx1_tweak_aes_enc_block_last:
vaesenclast %xmm5, %xmm0, %xmm0
xorl %r13d, %r13d
cmpl $0x40, %eax
movl %eax, %r11d
jl L_AES_XTS_encrypt_avx1_done_64
andl $0xffffffc0, %r11d
L_AES_XTS_encrypt_avx1_enc_64:
# 64 bytes of input
# aes_enc_64
leaq (%rdi,%r13,1), %rcx
leaq (%rsi,%r13,1), %rdx
vmovdqu (%rcx), %xmm8
vmovdqu 16(%rcx), %xmm9
vmovdqu 32(%rcx), %xmm10
vmovdqu 48(%rcx), %xmm11
vpsrad $31, %xmm0, %xmm4
vpslld $0x01, %xmm0, %xmm1
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm1, %xmm1
vpsrad $31, %xmm1, %xmm4
vpslld $0x01, %xmm1, %xmm2
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
vpsrad $31, %xmm2, %xmm4
vpslld $0x01, %xmm2, %xmm3
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
# aes_enc_block
vmovdqu (%r8), %xmm4
vpxor %xmm4, %xmm8, %xmm8
vpxor %xmm4, %xmm9, %xmm9
vpxor %xmm4, %xmm10, %xmm10
vpxor %xmm4, %xmm11, %xmm11
vmovdqu 16(%r8), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 32(%r8), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 48(%r8), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 64(%r8), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 80(%r8), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 96(%r8), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 112(%r8), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 128(%r8), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 144(%r8), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
cmpl $11, %r10d
vmovdqu 160(%r8), %xmm4
jl L_AES_XTS_encrypt_avx1_aes_enc_64_aes_enc_block_last
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 176(%r8), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
cmpl $13, %r10d
vmovdqu 192(%r8), %xmm4
jl L_AES_XTS_encrypt_avx1_aes_enc_64_aes_enc_block_last
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 208(%r8), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 224(%r8), %xmm4
L_AES_XTS_encrypt_avx1_aes_enc_64_aes_enc_block_last:
vaesenclast %xmm4, %xmm8, %xmm8
vaesenclast %xmm4, %xmm9, %xmm9
vaesenclast %xmm4, %xmm10, %xmm10
vaesenclast %xmm4, %xmm11, %xmm11
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
vmovdqu %xmm8, (%rdx)
vmovdqu %xmm9, 16(%rdx)
vmovdqu %xmm10, 32(%rdx)
vmovdqu %xmm11, 48(%rdx)
vpsrad $31, %xmm3, %xmm4
vpslld $0x01, %xmm3, %xmm0
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
addl $0x40, %r13d
cmpl %r11d, %r13d
jl L_AES_XTS_encrypt_avx1_enc_64
L_AES_XTS_encrypt_avx1_done_64:
cmpl %eax, %r13d
movl %eax, %r11d
je L_AES_XTS_encrypt_avx1_done_enc
subl %r13d, %r11d
cmpl $16, %r11d
movl %eax, %r11d
jl L_AES_XTS_encrypt_avx1_last_15
andl $0xfffffff0, %r11d
# 16 bytes of input
L_AES_XTS_encrypt_avx1_enc_16:
leaq (%rdi,%r13,1), %rcx
vmovdqu (%rcx), %xmm8
vpxor %xmm0, %xmm8, %xmm8
# aes_enc_block
vpxor (%r8), %xmm8, %xmm8
vmovdqu 16(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 32(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 48(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 64(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 80(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 96(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 112(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 128(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 144(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
cmpl $11, %r10d
vmovdqu 160(%r8), %xmm5
jl L_AES_XTS_encrypt_avx1_aes_enc_block_last
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 176(%r8), %xmm6
vaesenc %xmm6, %xmm8, %xmm8
cmpl $13, %r10d
vmovdqu 192(%r8), %xmm5
jl L_AES_XTS_encrypt_avx1_aes_enc_block_last
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 208(%r8), %xmm6
vaesenc %xmm6, %xmm8, %xmm8
vmovdqu 224(%r8), %xmm5
L_AES_XTS_encrypt_avx1_aes_enc_block_last:
vaesenclast %xmm5, %xmm8, %xmm8
vpxor %xmm0, %xmm8, %xmm8
leaq (%rsi,%r13,1), %rcx
vmovdqu %xmm8, (%rcx)
vpsrad $31, %xmm0, %xmm4
vpslld $0x01, %xmm0, %xmm0
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
addl $16, %r13d
cmpl %r11d, %r13d
jl L_AES_XTS_encrypt_avx1_enc_16
cmpl %eax, %r13d
je L_AES_XTS_encrypt_avx1_done_enc
L_AES_XTS_encrypt_avx1_last_15:
subq $16, %r13
leaq (%rsi,%r13,1), %rcx
vmovdqu (%rcx), %xmm8
addq $16, %r13
vmovdqu %xmm8, (%rsp)
xorq %rdx, %rdx
L_AES_XTS_encrypt_avx1_last_15_byte_loop:
movb (%rsp,%rdx,1), %r11b
movb (%rdi,%r13,1), %cl
movb %r11b, (%rsi,%r13,1)
movb %cl, (%rsp,%rdx,1)
incl %r13d
incl %edx
cmpl %eax, %r13d
jl L_AES_XTS_encrypt_avx1_last_15_byte_loop
subq %rdx, %r13
vmovdqu (%rsp), %xmm8
subq $16, %r13
vpxor %xmm0, %xmm8, %xmm8
# aes_enc_block
vpxor (%r8), %xmm8, %xmm8
vmovdqu 16(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 32(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 48(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 64(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 80(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 96(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 112(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 128(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 144(%r8), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
cmpl $11, %r10d
vmovdqu 160(%r8), %xmm5
jl L_AES_XTS_encrypt_avx1_last_15_aes_enc_block_last
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 176(%r8), %xmm6
vaesenc %xmm6, %xmm8, %xmm8
cmpl $13, %r10d
vmovdqu 192(%r8), %xmm5
jl L_AES_XTS_encrypt_avx1_last_15_aes_enc_block_last
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 208(%r8), %xmm6
vaesenc %xmm6, %xmm8, %xmm8
vmovdqu 224(%r8), %xmm5
L_AES_XTS_encrypt_avx1_last_15_aes_enc_block_last:
vaesenclast %xmm5, %xmm8, %xmm8
vpxor %xmm0, %xmm8, %xmm8
leaq (%rsi,%r13,1), %rcx
vmovdqu %xmm8, (%rcx)
L_AES_XTS_encrypt_avx1_done_enc:
addq $0x40, %rsp
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size AES_XTS_encrypt_avx1,.-AES_XTS_encrypt_avx1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_XTS_encrypt_update_avx1
.type AES_XTS_encrypt_update_avx1,@function
.align 16
AES_XTS_encrypt_update_avx1:
#else
.section __TEXT,__text
.globl _AES_XTS_encrypt_update_avx1
.p2align 4
_AES_XTS_encrypt_update_avx1:
#endif /* __APPLE__ */
pushq %r12
movq %rdx, %rax
movq %rcx, %r10
subq $0x40, %rsp
vmovdqu L_avx1_aes_xts_gc_xts(%rip), %xmm12
vmovdqu (%r8), %xmm0
xorl %r12d, %r12d
cmpl $0x40, %eax
movl %eax, %r11d
jl L_AES_XTS_encrypt_update_avx1_done_64
andl $0xffffffc0, %r11d
L_AES_XTS_encrypt_update_avx1_enc_64:
# 64 bytes of input
# aes_enc_64
leaq (%rdi,%r12,1), %rcx
leaq (%rsi,%r12,1), %rdx
vmovdqu (%rcx), %xmm8
vmovdqu 16(%rcx), %xmm9
vmovdqu 32(%rcx), %xmm10
vmovdqu 48(%rcx), %xmm11
vpsrad $31, %xmm0, %xmm4
vpslld $0x01, %xmm0, %xmm1
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm1, %xmm1
vpsrad $31, %xmm1, %xmm4
vpslld $0x01, %xmm1, %xmm2
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
vpsrad $31, %xmm2, %xmm4
vpslld $0x01, %xmm2, %xmm3
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
# aes_enc_block
vmovdqu (%r10), %xmm4
vpxor %xmm4, %xmm8, %xmm8
vpxor %xmm4, %xmm9, %xmm9
vpxor %xmm4, %xmm10, %xmm10
vpxor %xmm4, %xmm11, %xmm11
vmovdqu 16(%r10), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 32(%r10), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 48(%r10), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 64(%r10), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 80(%r10), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 96(%r10), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 112(%r10), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 128(%r10), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 144(%r10), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
cmpl $11, %r9d
vmovdqu 160(%r10), %xmm4
jl L_AES_XTS_encrypt_update_avx1_aes_enc_64_aes_enc_block_last
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 176(%r10), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
cmpl $13, %r9d
vmovdqu 192(%r10), %xmm4
jl L_AES_XTS_encrypt_update_avx1_aes_enc_64_aes_enc_block_last
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 208(%r10), %xmm4
vaesenc %xmm4, %xmm8, %xmm8
vaesenc %xmm4, %xmm9, %xmm9
vaesenc %xmm4, %xmm10, %xmm10
vaesenc %xmm4, %xmm11, %xmm11
vmovdqu 224(%r10), %xmm4
L_AES_XTS_encrypt_update_avx1_aes_enc_64_aes_enc_block_last:
vaesenclast %xmm4, %xmm8, %xmm8
vaesenclast %xmm4, %xmm9, %xmm9
vaesenclast %xmm4, %xmm10, %xmm10
vaesenclast %xmm4, %xmm11, %xmm11
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
vmovdqu %xmm8, (%rdx)
vmovdqu %xmm9, 16(%rdx)
vmovdqu %xmm10, 32(%rdx)
vmovdqu %xmm11, 48(%rdx)
vpsrad $31, %xmm3, %xmm4
vpslld $0x01, %xmm3, %xmm0
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
addl $0x40, %r12d
cmpl %r11d, %r12d
jl L_AES_XTS_encrypt_update_avx1_enc_64
L_AES_XTS_encrypt_update_avx1_done_64:
cmpl %eax, %r12d
movl %eax, %r11d
je L_AES_XTS_encrypt_update_avx1_done_enc
subl %r12d, %r11d
cmpl $16, %r11d
movl %eax, %r11d
jl L_AES_XTS_encrypt_update_avx1_last_15
andl $0xfffffff0, %r11d
# 16 bytes of input
L_AES_XTS_encrypt_update_avx1_enc_16:
leaq (%rdi,%r12,1), %rcx
vmovdqu (%rcx), %xmm8
vpxor %xmm0, %xmm8, %xmm8
# aes_enc_block
vpxor (%r10), %xmm8, %xmm8
vmovdqu 16(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 32(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 48(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 64(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 80(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 96(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 112(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 128(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 144(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
cmpl $11, %r9d
vmovdqu 160(%r10), %xmm5
jl L_AES_XTS_encrypt_update_avx1_aes_enc_block_last
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 176(%r10), %xmm6
vaesenc %xmm6, %xmm8, %xmm8
cmpl $13, %r9d
vmovdqu 192(%r10), %xmm5
jl L_AES_XTS_encrypt_update_avx1_aes_enc_block_last
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 208(%r10), %xmm6
vaesenc %xmm6, %xmm8, %xmm8
vmovdqu 224(%r10), %xmm5
L_AES_XTS_encrypt_update_avx1_aes_enc_block_last:
vaesenclast %xmm5, %xmm8, %xmm8
vpxor %xmm0, %xmm8, %xmm8
leaq (%rsi,%r12,1), %rcx
vmovdqu %xmm8, (%rcx)
vpsrad $31, %xmm0, %xmm4
vpslld $0x01, %xmm0, %xmm0
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
addl $16, %r12d
cmpl %r11d, %r12d
jl L_AES_XTS_encrypt_update_avx1_enc_16
cmpl %eax, %r12d
je L_AES_XTS_encrypt_update_avx1_done_enc
L_AES_XTS_encrypt_update_avx1_last_15:
subq $16, %r12
leaq (%rsi,%r12,1), %rcx
vmovdqu (%rcx), %xmm8
addq $16, %r12
vmovdqu %xmm8, (%rsp)
xorq %rdx, %rdx
L_AES_XTS_encrypt_update_avx1_last_15_byte_loop:
movb (%rsp,%rdx,1), %r11b
movb (%rdi,%r12,1), %cl
movb %r11b, (%rsi,%r12,1)
movb %cl, (%rsp,%rdx,1)
incl %r12d
incl %edx
cmpl %eax, %r12d
jl L_AES_XTS_encrypt_update_avx1_last_15_byte_loop
subq %rdx, %r12
vmovdqu (%rsp), %xmm8
subq $16, %r12
vpxor %xmm0, %xmm8, %xmm8
# aes_enc_block
vpxor (%r10), %xmm8, %xmm8
vmovdqu 16(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 32(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 48(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 64(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 80(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 96(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 112(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 128(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 144(%r10), %xmm5
vaesenc %xmm5, %xmm8, %xmm8
cmpl $11, %r9d
vmovdqu 160(%r10), %xmm5
jl L_AES_XTS_encrypt_update_avx1_last_15_aes_enc_block_last
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 176(%r10), %xmm6
vaesenc %xmm6, %xmm8, %xmm8
cmpl $13, %r9d
vmovdqu 192(%r10), %xmm5
jl L_AES_XTS_encrypt_update_avx1_last_15_aes_enc_block_last
vaesenc %xmm5, %xmm8, %xmm8
vmovdqu 208(%r10), %xmm6
vaesenc %xmm6, %xmm8, %xmm8
vmovdqu 224(%r10), %xmm5
L_AES_XTS_encrypt_update_avx1_last_15_aes_enc_block_last:
vaesenclast %xmm5, %xmm8, %xmm8
vpxor %xmm0, %xmm8, %xmm8
leaq (%rsi,%r12,1), %rcx
vmovdqu %xmm8, (%rcx)
L_AES_XTS_encrypt_update_avx1_done_enc:
vmovdqu %xmm0, (%r8)
addq $0x40, %rsp
popq %r12
repz retq
#ifndef __APPLE__
.size AES_XTS_encrypt_update_avx1,.-AES_XTS_encrypt_update_avx1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_XTS_decrypt_avx1
.type AES_XTS_decrypt_avx1,@function
.align 16
AES_XTS_decrypt_avx1:
#else
.section __TEXT,__text
.globl _AES_XTS_decrypt_avx1
.p2align 4
_AES_XTS_decrypt_avx1:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
movq %rdx, %rax
movq %rcx, %r12
movl 24(%rsp), %r10d
subq $16, %rsp
vmovdqu L_avx1_aes_xts_gc_xts(%rip), %xmm12
vmovdqu (%r12), %xmm0
# aes_enc_block
vpxor (%r9), %xmm0, %xmm0
vmovdqu 16(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 32(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 48(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 64(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 80(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 96(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 112(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 128(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 144(%r9), %xmm5
vaesenc %xmm5, %xmm0, %xmm0
cmpl $11, %r10d
vmovdqu 160(%r9), %xmm5
jl L_AES_XTS_decrypt_avx1_tweak_aes_enc_block_last
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 176(%r9), %xmm6
vaesenc %xmm6, %xmm0, %xmm0
cmpl $13, %r10d
vmovdqu 192(%r9), %xmm5
jl L_AES_XTS_decrypt_avx1_tweak_aes_enc_block_last
vaesenc %xmm5, %xmm0, %xmm0
vmovdqu 208(%r9), %xmm6
vaesenc %xmm6, %xmm0, %xmm0
vmovdqu 224(%r9), %xmm5
L_AES_XTS_decrypt_avx1_tweak_aes_enc_block_last:
vaesenclast %xmm5, %xmm0, %xmm0
xorl %r13d, %r13d
movl %eax, %r11d
andl $0xfffffff0, %r11d
cmpl %eax, %r11d
je L_AES_XTS_decrypt_avx1_mul16_64
subl $16, %r11d
cmpl $16, %r11d
jl L_AES_XTS_decrypt_avx1_last_31_start
L_AES_XTS_decrypt_avx1_mul16_64:
cmpl $0x40, %r11d
jl L_AES_XTS_decrypt_avx1_done_64
andl $0xffffffc0, %r11d
L_AES_XTS_decrypt_avx1_dec_64:
# 64 bytes of input
# aes_dec_64
leaq (%rdi,%r13,1), %rcx
leaq (%rsi,%r13,1), %rdx
vmovdqu (%rcx), %xmm8
vmovdqu 16(%rcx), %xmm9
vmovdqu 32(%rcx), %xmm10
vmovdqu 48(%rcx), %xmm11
vpsrad $31, %xmm0, %xmm4
vpslld $0x01, %xmm0, %xmm1
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm1, %xmm1
vpsrad $31, %xmm1, %xmm4
vpslld $0x01, %xmm1, %xmm2
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
vpsrad $31, %xmm2, %xmm4
vpslld $0x01, %xmm2, %xmm3
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
# aes_dec_block
vmovdqu (%r8), %xmm4
vpxor %xmm4, %xmm8, %xmm8
vpxor %xmm4, %xmm9, %xmm9
vpxor %xmm4, %xmm10, %xmm10
vpxor %xmm4, %xmm11, %xmm11
vmovdqu 16(%r8), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 32(%r8), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 48(%r8), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 64(%r8), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 80(%r8), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 96(%r8), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 112(%r8), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 128(%r8), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 144(%r8), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
cmpl $11, %r10d
vmovdqu 160(%r8), %xmm4
jl L_AES_XTS_decrypt_avx1_aes_dec_64_aes_dec_block_last
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 176(%r8), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
cmpl $13, %r10d
vmovdqu 192(%r8), %xmm4
jl L_AES_XTS_decrypt_avx1_aes_dec_64_aes_dec_block_last
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 208(%r8), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 224(%r8), %xmm4
L_AES_XTS_decrypt_avx1_aes_dec_64_aes_dec_block_last:
vaesdeclast %xmm4, %xmm8, %xmm8
vaesdeclast %xmm4, %xmm9, %xmm9
vaesdeclast %xmm4, %xmm10, %xmm10
vaesdeclast %xmm4, %xmm11, %xmm11
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
vmovdqu %xmm8, (%rdx)
vmovdqu %xmm9, 16(%rdx)
vmovdqu %xmm10, 32(%rdx)
vmovdqu %xmm11, 48(%rdx)
vpsrad $31, %xmm3, %xmm4
vpslld $0x01, %xmm3, %xmm0
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
addl $0x40, %r13d
cmpl %r11d, %r13d
jl L_AES_XTS_decrypt_avx1_dec_64
L_AES_XTS_decrypt_avx1_done_64:
cmpl %eax, %r13d
movl %eax, %r11d
je L_AES_XTS_decrypt_avx1_done_dec
andl $0xfffffff0, %r11d
cmpl %eax, %r11d
je L_AES_XTS_decrypt_avx1_mul16
subl $16, %r11d
subl %r13d, %r11d
cmpl $16, %r11d
jl L_AES_XTS_decrypt_avx1_last_31_start
addl %r13d, %r11d
L_AES_XTS_decrypt_avx1_mul16:
L_AES_XTS_decrypt_avx1_dec_16:
# 16 bytes of input
leaq (%rdi,%r13,1), %rcx
vmovdqu (%rcx), %xmm8
vpxor %xmm0, %xmm8, %xmm8
# aes_dec_block
vpxor (%r8), %xmm8, %xmm8
vmovdqu 16(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 32(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 48(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 64(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 80(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 96(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 112(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 128(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 144(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
cmpl $11, %r10d
vmovdqu 160(%r8), %xmm5
jl L_AES_XTS_decrypt_avx1_aes_dec_block_last
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 176(%r8), %xmm6
vaesdec %xmm6, %xmm8, %xmm8
cmpl $13, %r10d
vmovdqu 192(%r8), %xmm5
jl L_AES_XTS_decrypt_avx1_aes_dec_block_last
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 208(%r8), %xmm6
vaesdec %xmm6, %xmm8, %xmm8
vmovdqu 224(%r8), %xmm5
L_AES_XTS_decrypt_avx1_aes_dec_block_last:
vaesdeclast %xmm5, %xmm8, %xmm8
vpxor %xmm0, %xmm8, %xmm8
leaq (%rsi,%r13,1), %rcx
vmovdqu %xmm8, (%rcx)
vpsrad $31, %xmm0, %xmm4
vpslld $0x01, %xmm0, %xmm0
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
addl $16, %r13d
cmpl %r11d, %r13d
jl L_AES_XTS_decrypt_avx1_dec_16
cmpl %eax, %r13d
je L_AES_XTS_decrypt_avx1_done_dec
L_AES_XTS_decrypt_avx1_last_31_start:
vpsrad $31, %xmm0, %xmm4
vpslld $0x01, %xmm0, %xmm7
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm7, %xmm7
leaq (%rdi,%r13,1), %rcx
vmovdqu (%rcx), %xmm8
vpxor %xmm7, %xmm8, %xmm8
# aes_dec_block
vpxor (%r8), %xmm8, %xmm8
vmovdqu 16(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 32(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 48(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 64(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 80(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 96(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 112(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 128(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 144(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
cmpl $11, %r10d
vmovdqu 160(%r8), %xmm5
jl L_AES_XTS_decrypt_avx1_last_31_aes_dec_block_last
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 176(%r8), %xmm6
vaesdec %xmm6, %xmm8, %xmm8
cmpl $13, %r10d
vmovdqu 192(%r8), %xmm5
jl L_AES_XTS_decrypt_avx1_last_31_aes_dec_block_last
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 208(%r8), %xmm6
vaesdec %xmm6, %xmm8, %xmm8
vmovdqu 224(%r8), %xmm5
L_AES_XTS_decrypt_avx1_last_31_aes_dec_block_last:
vaesdeclast %xmm5, %xmm8, %xmm8
vpxor %xmm7, %xmm8, %xmm8
vmovdqu %xmm8, (%rsp)
addq $16, %r13
xorq %rdx, %rdx
L_AES_XTS_decrypt_avx1_last_31_byte_loop:
movb (%rsp,%rdx,1), %r11b
movb (%rdi,%r13,1), %cl
movb %r11b, (%rsi,%r13,1)
movb %cl, (%rsp,%rdx,1)
incl %r13d
incl %edx
cmpl %eax, %r13d
jl L_AES_XTS_decrypt_avx1_last_31_byte_loop
subq %rdx, %r13
vmovdqu (%rsp), %xmm8
vpxor %xmm0, %xmm8, %xmm8
# aes_dec_block
vpxor (%r8), %xmm8, %xmm8
vmovdqu 16(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 32(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 48(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 64(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 80(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 96(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 112(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 128(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 144(%r8), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
cmpl $11, %r10d
vmovdqu 160(%r8), %xmm5
jl L_AES_XTS_decrypt_avx1_last_31_2_aes_dec_block_last
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 176(%r8), %xmm6
vaesdec %xmm6, %xmm8, %xmm8
cmpl $13, %r10d
vmovdqu 192(%r8), %xmm5
jl L_AES_XTS_decrypt_avx1_last_31_2_aes_dec_block_last
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 208(%r8), %xmm6
vaesdec %xmm6, %xmm8, %xmm8
vmovdqu 224(%r8), %xmm5
L_AES_XTS_decrypt_avx1_last_31_2_aes_dec_block_last:
vaesdeclast %xmm5, %xmm8, %xmm8
vpxor %xmm0, %xmm8, %xmm8
subq $16, %r13
leaq (%rsi,%r13,1), %rcx
vmovdqu %xmm8, (%rcx)
L_AES_XTS_decrypt_avx1_done_dec:
addq $16, %rsp
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size AES_XTS_decrypt_avx1,.-AES_XTS_decrypt_avx1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl AES_XTS_decrypt_update_avx1
.type AES_XTS_decrypt_update_avx1,@function
.align 16
AES_XTS_decrypt_update_avx1:
#else
.section __TEXT,__text
.globl _AES_XTS_decrypt_update_avx1
.p2align 4
_AES_XTS_decrypt_update_avx1:
#endif /* __APPLE__ */
pushq %r12
movq %rdx, %rax
movq %rcx, %r10
subq $16, %rsp
vmovdqu L_avx1_aes_xts_gc_xts(%rip), %xmm12
vmovdqu (%r8), %xmm0
xorl %r12d, %r12d
movl %eax, %r11d
andl $0xfffffff0, %r11d
cmpl %eax, %r11d
je L_AES_XTS_decrypt_update_avx1_mul16_64
subl $16, %r11d
cmpl $16, %r11d
jl L_AES_XTS_decrypt_update_avx1_last_31_start
L_AES_XTS_decrypt_update_avx1_mul16_64:
cmpl $0x40, %r11d
jl L_AES_XTS_decrypt_update_avx1_done_64
andl $0xffffffc0, %r11d
L_AES_XTS_decrypt_update_avx1_dec_64:
# 64 bytes of input
# aes_dec_64
leaq (%rdi,%r12,1), %rcx
leaq (%rsi,%r12,1), %rdx
vmovdqu (%rcx), %xmm8
vmovdqu 16(%rcx), %xmm9
vmovdqu 32(%rcx), %xmm10
vmovdqu 48(%rcx), %xmm11
vpsrad $31, %xmm0, %xmm4
vpslld $0x01, %xmm0, %xmm1
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm1, %xmm1
vpsrad $31, %xmm1, %xmm4
vpslld $0x01, %xmm1, %xmm2
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
vpsrad $31, %xmm2, %xmm4
vpslld $0x01, %xmm2, %xmm3
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
# aes_dec_block
vmovdqu (%r10), %xmm4
vpxor %xmm4, %xmm8, %xmm8
vpxor %xmm4, %xmm9, %xmm9
vpxor %xmm4, %xmm10, %xmm10
vpxor %xmm4, %xmm11, %xmm11
vmovdqu 16(%r10), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 32(%r10), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 48(%r10), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 64(%r10), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 80(%r10), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 96(%r10), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 112(%r10), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 128(%r10), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 144(%r10), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
cmpl $11, %r9d
vmovdqu 160(%r10), %xmm4
jl L_AES_XTS_decrypt_update_avx1_aes_dec_64_aes_dec_block_last
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 176(%r10), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
cmpl $13, %r9d
vmovdqu 192(%r10), %xmm4
jl L_AES_XTS_decrypt_update_avx1_aes_dec_64_aes_dec_block_last
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 208(%r10), %xmm4
vaesdec %xmm4, %xmm8, %xmm8
vaesdec %xmm4, %xmm9, %xmm9
vaesdec %xmm4, %xmm10, %xmm10
vaesdec %xmm4, %xmm11, %xmm11
vmovdqu 224(%r10), %xmm4
L_AES_XTS_decrypt_update_avx1_aes_dec_64_aes_dec_block_last:
vaesdeclast %xmm4, %xmm8, %xmm8
vaesdeclast %xmm4, %xmm9, %xmm9
vaesdeclast %xmm4, %xmm10, %xmm10
vaesdeclast %xmm4, %xmm11, %xmm11
vpxor %xmm0, %xmm8, %xmm8
vpxor %xmm1, %xmm9, %xmm9
vpxor %xmm2, %xmm10, %xmm10
vpxor %xmm3, %xmm11, %xmm11
vmovdqu %xmm8, (%rdx)
vmovdqu %xmm9, 16(%rdx)
vmovdqu %xmm10, 32(%rdx)
vmovdqu %xmm11, 48(%rdx)
vpsrad $31, %xmm3, %xmm4
vpslld $0x01, %xmm3, %xmm0
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
addl $0x40, %r12d
cmpl %r11d, %r12d
jl L_AES_XTS_decrypt_update_avx1_dec_64
L_AES_XTS_decrypt_update_avx1_done_64:
cmpl %eax, %r12d
movl %eax, %r11d
je L_AES_XTS_decrypt_update_avx1_done_dec
andl $0xfffffff0, %r11d
cmpl %eax, %r11d
je L_AES_XTS_decrypt_update_avx1_mul16
subl $16, %r11d
subl %r12d, %r11d
cmpl $16, %r11d
jl L_AES_XTS_decrypt_update_avx1_last_31_start
addl %r12d, %r11d
L_AES_XTS_decrypt_update_avx1_mul16:
L_AES_XTS_decrypt_update_avx1_dec_16:
# 16 bytes of input
leaq (%rdi,%r12,1), %rcx
vmovdqu (%rcx), %xmm8
vpxor %xmm0, %xmm8, %xmm8
# aes_dec_block
vpxor (%r10), %xmm8, %xmm8
vmovdqu 16(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 32(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 48(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 64(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 80(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 96(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 112(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 128(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 144(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
cmpl $11, %r9d
vmovdqu 160(%r10), %xmm5
jl L_AES_XTS_decrypt_update_avx1_aes_dec_block_last
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 176(%r10), %xmm6
vaesdec %xmm6, %xmm8, %xmm8
cmpl $13, %r9d
vmovdqu 192(%r10), %xmm5
jl L_AES_XTS_decrypt_update_avx1_aes_dec_block_last
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 208(%r10), %xmm6
vaesdec %xmm6, %xmm8, %xmm8
vmovdqu 224(%r10), %xmm5
L_AES_XTS_decrypt_update_avx1_aes_dec_block_last:
vaesdeclast %xmm5, %xmm8, %xmm8
vpxor %xmm0, %xmm8, %xmm8
leaq (%rsi,%r12,1), %rcx
vmovdqu %xmm8, (%rcx)
vpsrad $31, %xmm0, %xmm4
vpslld $0x01, %xmm0, %xmm0
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
addl $16, %r12d
cmpl %r11d, %r12d
jl L_AES_XTS_decrypt_update_avx1_dec_16
cmpl %eax, %r12d
je L_AES_XTS_decrypt_update_avx1_done_dec
L_AES_XTS_decrypt_update_avx1_last_31_start:
vpsrad $31, %xmm0, %xmm4
vpslld $0x01, %xmm0, %xmm7
vpshufd $0x93, %xmm4, %xmm4
vpand %xmm12, %xmm4, %xmm4
vpxor %xmm4, %xmm7, %xmm7
leaq (%rdi,%r12,1), %rcx
vmovdqu (%rcx), %xmm8
vpxor %xmm7, %xmm8, %xmm8
# aes_dec_block
vpxor (%r10), %xmm8, %xmm8
vmovdqu 16(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 32(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 48(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 64(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 80(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 96(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 112(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 128(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 144(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
cmpl $11, %r9d
vmovdqu 160(%r10), %xmm5
jl L_AES_XTS_decrypt_update_avx1_last_31_aes_dec_block_last
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 176(%r10), %xmm6
vaesdec %xmm6, %xmm8, %xmm8
cmpl $13, %r9d
vmovdqu 192(%r10), %xmm5
jl L_AES_XTS_decrypt_update_avx1_last_31_aes_dec_block_last
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 208(%r10), %xmm6
vaesdec %xmm6, %xmm8, %xmm8
vmovdqu 224(%r10), %xmm5
L_AES_XTS_decrypt_update_avx1_last_31_aes_dec_block_last:
vaesdeclast %xmm5, %xmm8, %xmm8
vpxor %xmm7, %xmm8, %xmm8
vmovdqu %xmm8, (%rsp)
addq $16, %r12
xorq %rdx, %rdx
L_AES_XTS_decrypt_update_avx1_last_31_byte_loop:
movb (%rsp,%rdx,1), %r11b
movb (%rdi,%r12,1), %cl
movb %r11b, (%rsi,%r12,1)
movb %cl, (%rsp,%rdx,1)
incl %r12d
incl %edx
cmpl %eax, %r12d
jl L_AES_XTS_decrypt_update_avx1_last_31_byte_loop
subq %rdx, %r12
vmovdqu (%rsp), %xmm8
vpxor %xmm0, %xmm8, %xmm8
# aes_dec_block
vpxor (%r10), %xmm8, %xmm8
vmovdqu 16(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 32(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 48(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 64(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 80(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 96(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 112(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 128(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 144(%r10), %xmm5
vaesdec %xmm5, %xmm8, %xmm8
cmpl $11, %r9d
vmovdqu 160(%r10), %xmm5
jl L_AES_XTS_decrypt_update_avx1_last_31_2_aes_dec_block_last
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 176(%r10), %xmm6
vaesdec %xmm6, %xmm8, %xmm8
cmpl $13, %r9d
vmovdqu 192(%r10), %xmm5
jl L_AES_XTS_decrypt_update_avx1_last_31_2_aes_dec_block_last
vaesdec %xmm5, %xmm8, %xmm8
vmovdqu 208(%r10), %xmm6
vaesdec %xmm6, %xmm8, %xmm8
vmovdqu 224(%r10), %xmm5
L_AES_XTS_decrypt_update_avx1_last_31_2_aes_dec_block_last:
vaesdeclast %xmm5, %xmm8, %xmm8
vpxor %xmm0, %xmm8, %xmm8
subq $16, %r12
leaq (%rsi,%r12,1), %rcx
vmovdqu %xmm8, (%rcx)
L_AES_XTS_decrypt_update_avx1_done_dec:
vmovdqu %xmm0, (%r8)
addq $16, %rsp
popq %r12
repz retq
#ifndef __APPLE__
.size AES_XTS_decrypt_update_avx1,.-AES_XTS_decrypt_update_avx1
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX1 */
#endif /* WOLFSSL_X86_64_BUILD */
#endif /* WOLFSSL_AES_XTS */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_RestartAdvComIT/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_RestartAdvComIT/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aenu1/aps3e
| 61,299
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/aes_asm.S
|
/* aes_asm.S
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
/* This file is in at&t asm syntax, see .asm for intel syntax */
/* See Intel Advanced Encryption Standard (AES) Instructions Set White Paper
* by Intel Mobility Group, Israel Development Center, Israel Shay Gueron
*/
#ifdef WOLFSSL_X86_64_BUILD
/*
AES_CBC_encrypt_AESNI (const unsigned char *in,
unsigned char *out,
unsigned char ivec[16],
unsigned long length,
const unsigned char *KS,
int nr)
*/
#ifndef __APPLE__
.globl AES_CBC_encrypt_AESNI
AES_CBC_encrypt_AESNI:
#else
.globl _AES_CBC_encrypt_AESNI
_AES_CBC_encrypt_AESNI:
#endif
# parameter 1: %rdi
# parameter 2: %rsi
# parameter 3: %rdx
# parameter 4: %rcx
# parameter 5: %r8
# parameter 6: %r9d
movq %rcx, %r10
shrq $4, %rcx
shlq $60, %r10
je NO_PARTS
addq $1, %rcx
NO_PARTS:
subq $16, %rsi
movdqa (%rdx), %xmm1
LOOP:
pxor (%rdi), %xmm1
pxor (%r8), %xmm1
addq $16,%rsi
addq $16,%rdi
cmpl $12, %r9d
aesenc 16(%r8),%xmm1
aesenc 32(%r8),%xmm1
aesenc 48(%r8),%xmm1
aesenc 64(%r8),%xmm1
aesenc 80(%r8),%xmm1
aesenc 96(%r8),%xmm1
aesenc 112(%r8),%xmm1
aesenc 128(%r8),%xmm1
aesenc 144(%r8),%xmm1
movdqa 160(%r8),%xmm2
jb LAST
cmpl $14, %r9d
aesenc 160(%r8),%xmm1
aesenc 176(%r8),%xmm1
movdqa 192(%r8),%xmm2
jb LAST
aesenc 192(%r8),%xmm1
aesenc 208(%r8),%xmm1
movdqa 224(%r8),%xmm2
LAST:
decq %rcx
aesenclast %xmm2,%xmm1
movdqu %xmm1,(%rsi)
jne LOOP
ret
#if defined(WOLFSSL_AESNI_BY4)
/*
AES_CBC_decrypt_AESNI_by4 (const unsigned char *in,
unsigned char *out,
unsigned char ivec[16],
unsigned long length,
const unsigned char *KS,
int nr)
*/
#ifndef __APPLE__
.globl AES_CBC_decrypt_AESNI_by4
AES_CBC_decrypt_AESNI_by4:
#else
.globl _AES_CBC_decrypt_AESNI_by4
_AES_CBC_decrypt_AESNI_by4:
#endif
# parameter 1: %rdi
# parameter 2: %rsi
# parameter 3: %rdx
# parameter 4: %rcx
# parameter 5: %r8
# parameter 6: %r9d
movq %rcx, %r10
shrq $4, %rcx
shlq $60, %r10
je DNO_PARTS_4
addq $1, %rcx
DNO_PARTS_4:
movq %rcx, %r10
shlq $62, %r10
shrq $62, %r10
shrq $2, %rcx
movdqu (%rdx),%xmm5
je DREMAINDER_4
subq $64, %rsi
DLOOP_4:
movdqu (%rdi), %xmm1
movdqu 16(%rdi), %xmm2
movdqu 32(%rdi), %xmm3
movdqu 48(%rdi), %xmm4
movdqa %xmm1, %xmm6
movdqa %xmm2, %xmm7
movdqa %xmm3, %xmm8
movdqa %xmm4, %xmm15
movdqa (%r8), %xmm9
movdqa 16(%r8), %xmm10
movdqa 32(%r8), %xmm11
movdqa 48(%r8), %xmm12
pxor %xmm9, %xmm1
pxor %xmm9, %xmm2
pxor %xmm9, %xmm3
pxor %xmm9, %xmm4
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
aesdec %xmm11, %xmm1
aesdec %xmm11, %xmm2
aesdec %xmm11, %xmm3
aesdec %xmm11, %xmm4
aesdec %xmm12, %xmm1
aesdec %xmm12, %xmm2
aesdec %xmm12, %xmm3
aesdec %xmm12, %xmm4
movdqa 64(%r8), %xmm9
movdqa 80(%r8), %xmm10
movdqa 96(%r8), %xmm11
movdqa 112(%r8), %xmm12
aesdec %xmm9, %xmm1
aesdec %xmm9, %xmm2
aesdec %xmm9, %xmm3
aesdec %xmm9, %xmm4
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
aesdec %xmm11, %xmm1
aesdec %xmm11, %xmm2
aesdec %xmm11, %xmm3
aesdec %xmm11, %xmm4
aesdec %xmm12, %xmm1
aesdec %xmm12, %xmm2
aesdec %xmm12, %xmm3
aesdec %xmm12, %xmm4
movdqa 128(%r8), %xmm9
movdqa 144(%r8), %xmm10
movdqa 160(%r8), %xmm11
cmpl $12, %r9d
aesdec %xmm9, %xmm1
aesdec %xmm9, %xmm2
aesdec %xmm9, %xmm3
aesdec %xmm9, %xmm4
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
jb DLAST_4
movdqa 160(%r8), %xmm9
movdqa 176(%r8), %xmm10
movdqa 192(%r8), %xmm11
cmpl $14, %r9d
aesdec %xmm9, %xmm1
aesdec %xmm9, %xmm2
aesdec %xmm9, %xmm3
aesdec %xmm9, %xmm4
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
jb DLAST_4
movdqa 192(%r8), %xmm9
movdqa 208(%r8), %xmm10
movdqa 224(%r8), %xmm11
aesdec %xmm9, %xmm1
aesdec %xmm9, %xmm2
aesdec %xmm9, %xmm3
aesdec %xmm9, %xmm4
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
DLAST_4:
addq $64, %rdi
addq $64, %rsi
decq %rcx
aesdeclast %xmm11, %xmm1
aesdeclast %xmm11, %xmm2
aesdeclast %xmm11, %xmm3
aesdeclast %xmm11, %xmm4
pxor %xmm5, %xmm1
pxor %xmm6, %xmm2
pxor %xmm7, %xmm3
pxor %xmm8, %xmm4
movdqu %xmm1, (%rsi)
movdqu %xmm2, 16(%rsi)
movdqu %xmm3, 32(%rsi)
movdqu %xmm4, 48(%rsi)
movdqa %xmm15,%xmm5
jne DLOOP_4
addq $64, %rsi
DREMAINDER_4:
cmpq $0, %r10
je DEND_4
DLOOP_4_2:
movdqu (%rdi), %xmm1
movdqa %xmm1, %xmm15
addq $16, %rdi
pxor (%r8), %xmm1
movdqu 160(%r8), %xmm2
cmpl $12, %r9d
aesdec 16(%r8), %xmm1
aesdec 32(%r8), %xmm1
aesdec 48(%r8), %xmm1
aesdec 64(%r8), %xmm1
aesdec 80(%r8), %xmm1
aesdec 96(%r8), %xmm1
aesdec 112(%r8), %xmm1
aesdec 128(%r8), %xmm1
aesdec 144(%r8), %xmm1
jb DLAST_4_2
movdqu 192(%r8), %xmm2
cmpl $14, %r9d
aesdec 160(%r8), %xmm1
aesdec 176(%r8), %xmm1
jb DLAST_4_2
movdqu 224(%r8), %xmm2
aesdec 192(%r8), %xmm1
aesdec 208(%r8), %xmm1
DLAST_4_2:
aesdeclast %xmm2, %xmm1
pxor %xmm5, %xmm1
movdqa %xmm15, %xmm5
movdqu %xmm1, (%rsi)
addq $16, %rsi
decq %r10
jne DLOOP_4_2
DEND_4:
ret
#elif defined(WOLFSSL_AESNI_BY6)
/*
AES_CBC_decrypt_AESNI_by6 (const unsigned char *in,
unsigned char *out,
unsigned char ivec[16],
unsigned long length,
const unsigned char *KS,
int nr)
*/
#ifndef __APPLE__
.globl AES_CBC_decrypt_AESNI_by6
AES_CBC_decrypt_AESNI_by6:
#else
.globl _AES_CBC_decrypt_AESNI_by6
_AES_CBC_decrypt_AESNI_by6:
#endif
# parameter 1: %rdi - in
# parameter 2: %rsi - out
# parameter 3: %rdx - ivec
# parameter 4: %rcx - length
# parameter 5: %r8 - KS
# parameter 6: %r9d - nr
movq %rcx, %r10
shrq $4, %rcx
shlq $60, %r10
je DNO_PARTS_6
addq $1, %rcx
DNO_PARTS_6:
movq %rax, %r12
movq %rdx, %r13
movq %rbx, %r14
movq $0, %rdx
movq %rcx, %rax
movq $6, %rbx
div %rbx
movq %rax, %rcx
movq %rdx, %r10
movq %r12, %rax
movq %r13, %rdx
movq %r14, %rbx
cmpq $0, %rcx
movdqu (%rdx), %xmm7
je DREMAINDER_6
subq $96, %rsi
DLOOP_6:
movdqu (%rdi), %xmm1
movdqu 16(%rdi), %xmm2
movdqu 32(%rdi), %xmm3
movdqu 48(%rdi), %xmm4
movdqu 64(%rdi), %xmm5
movdqu 80(%rdi), %xmm6
movdqa (%r8), %xmm8
movdqa 16(%r8), %xmm9
movdqa 32(%r8), %xmm10
movdqa 48(%r8), %xmm11
pxor %xmm8, %xmm1
pxor %xmm8, %xmm2
pxor %xmm8, %xmm3
pxor %xmm8, %xmm4
pxor %xmm8, %xmm5
pxor %xmm8, %xmm6
aesdec %xmm9, %xmm1
aesdec %xmm9, %xmm2
aesdec %xmm9, %xmm3
aesdec %xmm9, %xmm4
aesdec %xmm9, %xmm5
aesdec %xmm9, %xmm6
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
aesdec %xmm10, %xmm5
aesdec %xmm10, %xmm6
aesdec %xmm11, %xmm1
aesdec %xmm11, %xmm2
aesdec %xmm11, %xmm3
aesdec %xmm11, %xmm4
aesdec %xmm11, %xmm5
aesdec %xmm11, %xmm6
movdqa 64(%r8), %xmm8
movdqa 80(%r8), %xmm9
movdqa 96(%r8), %xmm10
movdqa 112(%r8), %xmm11
aesdec %xmm8, %xmm1
aesdec %xmm8, %xmm2
aesdec %xmm8, %xmm3
aesdec %xmm8, %xmm4
aesdec %xmm8, %xmm5
aesdec %xmm8, %xmm6
aesdec %xmm9, %xmm1
aesdec %xmm9, %xmm2
aesdec %xmm9, %xmm3
aesdec %xmm9, %xmm4
aesdec %xmm9, %xmm5
aesdec %xmm9, %xmm6
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
aesdec %xmm10, %xmm5
aesdec %xmm10, %xmm6
aesdec %xmm11, %xmm1
aesdec %xmm11, %xmm2
aesdec %xmm11, %xmm3
aesdec %xmm11, %xmm4
aesdec %xmm11, %xmm5
aesdec %xmm11, %xmm6
movdqa 128(%r8), %xmm8
movdqa 144(%r8), %xmm9
movdqa 160(%r8), %xmm10
cmpl $12, %r9d
aesdec %xmm8, %xmm1
aesdec %xmm8, %xmm2
aesdec %xmm8, %xmm3
aesdec %xmm8, %xmm4
aesdec %xmm8, %xmm5
aesdec %xmm8, %xmm6
aesdec %xmm9, %xmm1
aesdec %xmm9, %xmm2
aesdec %xmm9, %xmm3
aesdec %xmm9, %xmm4
aesdec %xmm9, %xmm5
aesdec %xmm9, %xmm6
jb DLAST_6
movdqa 160(%r8), %xmm8
movdqa 176(%r8), %xmm9
movdqa 192(%r8), %xmm10
cmpl $14, %r9d
aesdec %xmm8, %xmm1
aesdec %xmm8, %xmm2
aesdec %xmm8, %xmm3
aesdec %xmm8, %xmm4
aesdec %xmm8, %xmm5
aesdec %xmm8, %xmm6
aesdec %xmm9, %xmm1
aesdec %xmm9, %xmm2
aesdec %xmm9, %xmm3
aesdec %xmm9, %xmm4
aesdec %xmm9, %xmm5
aesdec %xmm9, %xmm6
jb DLAST_6
movdqa 192(%r8), %xmm8
movdqa 208(%r8), %xmm9
movdqa 224(%r8), %xmm10
aesdec %xmm8, %xmm1
aesdec %xmm8, %xmm2
aesdec %xmm8, %xmm3
aesdec %xmm8, %xmm4
aesdec %xmm8, %xmm5
aesdec %xmm8, %xmm6
aesdec %xmm9, %xmm1
aesdec %xmm9, %xmm2
aesdec %xmm9, %xmm3
aesdec %xmm9, %xmm4
aesdec %xmm9, %xmm5
aesdec %xmm9, %xmm6
DLAST_6:
addq $96, %rsi
aesdeclast %xmm10, %xmm1
aesdeclast %xmm10, %xmm2
aesdeclast %xmm10, %xmm3
aesdeclast %xmm10, %xmm4
aesdeclast %xmm10, %xmm5
aesdeclast %xmm10, %xmm6
movdqu (%rdi), %xmm8
movdqu 16(%rdi), %xmm9
movdqu 32(%rdi), %xmm10
movdqu 48(%rdi), %xmm11
movdqu 64(%rdi), %xmm12
movdqu 80(%rdi), %xmm13
pxor %xmm7, %xmm1
pxor %xmm8, %xmm2
pxor %xmm9, %xmm3
pxor %xmm10, %xmm4
pxor %xmm11, %xmm5
pxor %xmm12, %xmm6
movdqu %xmm13, %xmm7
movdqu %xmm1, (%rsi)
movdqu %xmm2, 16(%rsi)
movdqu %xmm3, 32(%rsi)
movdqu %xmm4, 48(%rsi)
movdqu %xmm5, 64(%rsi)
movdqu %xmm6, 80(%rsi)
addq $96, %rdi
decq %rcx
jne DLOOP_6
addq $96, %rsi
DREMAINDER_6:
cmpq $0, %r10
je DEND_6
DLOOP_6_2:
movdqu (%rdi), %xmm1
movdqa %xmm1, %xmm10
addq $16, %rdi
pxor (%r8), %xmm1
movdqu 160(%r8), %xmm2
cmpl $12, %r9d
aesdec 16(%r8), %xmm1
aesdec 32(%r8), %xmm1
aesdec 48(%r8), %xmm1
aesdec 64(%r8), %xmm1
aesdec 80(%r8), %xmm1
aesdec 96(%r8), %xmm1
aesdec 112(%r8), %xmm1
aesdec 128(%r8), %xmm1
aesdec 144(%r8), %xmm1
jb DLAST_6_2
movdqu 192(%r8), %xmm2
cmpl $14, %r9d
aesdec 160(%r8), %xmm1
aesdec 176(%r8), %xmm1
jb DLAST_6_2
movdqu 224(%r8), %xmm2
aesdec 192(%r8), %xmm1
aesdec 208(%r8), %xmm1
DLAST_6_2:
aesdeclast %xmm2, %xmm1
pxor %xmm7, %xmm1
movdqa %xmm10, %xmm7
movdqu %xmm1, (%rsi)
addq $16, %rsi
decq %r10
jne DLOOP_6_2
DEND_6:
ret
#else /* WOLFSSL_AESNI_BYx */
/*
AES_CBC_decrypt_AESNI_by8 (const unsigned char *in,
unsigned char *out,
unsigned char ivec[16],
unsigned long length,
const unsigned char *KS,
int nr)
*/
#ifndef __APPLE__
.globl AES_CBC_decrypt_AESNI_by8
AES_CBC_decrypt_AESNI_by8:
#else
.globl _AES_CBC_decrypt_AESNI_by8
_AES_CBC_decrypt_AESNI_by8:
#endif
# parameter 1: %rdi - in
# parameter 2: %rsi - out
# parameter 3: %rdx - ivec
# parameter 4: %rcx - length
# parameter 5: %r8 - KS
# parameter 6: %r9d - nr
movq %rcx, %r10
shrq $4, %rcx
shlq $60, %r10
je DNO_PARTS_8
addq $1, %rcx
DNO_PARTS_8:
movq %rcx, %r10
shlq $61, %r10
shrq $61, %r10
shrq $3, %rcx
movdqu (%rdx), %xmm9
je DREMAINDER_8
subq $128, %rsi
DLOOP_8:
movdqu (%rdi), %xmm1
movdqu 16(%rdi), %xmm2
movdqu 32(%rdi), %xmm3
movdqu 48(%rdi), %xmm4
movdqu 64(%rdi), %xmm5
movdqu 80(%rdi), %xmm6
movdqu 96(%rdi), %xmm7
movdqu 112(%rdi), %xmm8
movdqa (%r8), %xmm10
movdqa 16(%r8), %xmm11
movdqa 32(%r8), %xmm12
movdqa 48(%r8), %xmm13
pxor %xmm10, %xmm1
pxor %xmm10, %xmm2
pxor %xmm10, %xmm3
pxor %xmm10, %xmm4
pxor %xmm10, %xmm5
pxor %xmm10, %xmm6
pxor %xmm10, %xmm7
pxor %xmm10, %xmm8
aesdec %xmm11, %xmm1
aesdec %xmm11, %xmm2
aesdec %xmm11, %xmm3
aesdec %xmm11, %xmm4
aesdec %xmm11, %xmm5
aesdec %xmm11, %xmm6
aesdec %xmm11, %xmm7
aesdec %xmm11, %xmm8
aesdec %xmm12, %xmm1
aesdec %xmm12, %xmm2
aesdec %xmm12, %xmm3
aesdec %xmm12, %xmm4
aesdec %xmm12, %xmm5
aesdec %xmm12, %xmm6
aesdec %xmm12, %xmm7
aesdec %xmm12, %xmm8
aesdec %xmm13, %xmm1
aesdec %xmm13, %xmm2
aesdec %xmm13, %xmm3
aesdec %xmm13, %xmm4
aesdec %xmm13, %xmm5
aesdec %xmm13, %xmm6
aesdec %xmm13, %xmm7
aesdec %xmm13, %xmm8
movdqa 64(%r8), %xmm10
movdqa 80(%r8), %xmm11
movdqa 96(%r8), %xmm12
movdqa 112(%r8), %xmm13
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
aesdec %xmm10, %xmm5
aesdec %xmm10, %xmm6
aesdec %xmm10, %xmm7
aesdec %xmm10, %xmm8
aesdec %xmm11, %xmm1
aesdec %xmm11, %xmm2
aesdec %xmm11, %xmm3
aesdec %xmm11, %xmm4
aesdec %xmm11, %xmm5
aesdec %xmm11, %xmm6
aesdec %xmm11, %xmm7
aesdec %xmm11, %xmm8
aesdec %xmm12, %xmm1
aesdec %xmm12, %xmm2
aesdec %xmm12, %xmm3
aesdec %xmm12, %xmm4
aesdec %xmm12, %xmm5
aesdec %xmm12, %xmm6
aesdec %xmm12, %xmm7
aesdec %xmm12, %xmm8
aesdec %xmm13, %xmm1
aesdec %xmm13, %xmm2
aesdec %xmm13, %xmm3
aesdec %xmm13, %xmm4
aesdec %xmm13, %xmm5
aesdec %xmm13, %xmm6
aesdec %xmm13, %xmm7
aesdec %xmm13, %xmm8
movdqa 128(%r8), %xmm10
movdqa 144(%r8), %xmm11
movdqa 160(%r8), %xmm12
cmpl $12, %r9d
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
aesdec %xmm10, %xmm5
aesdec %xmm10, %xmm6
aesdec %xmm10, %xmm7
aesdec %xmm10, %xmm8
aesdec %xmm11, %xmm1
aesdec %xmm11, %xmm2
aesdec %xmm11, %xmm3
aesdec %xmm11, %xmm4
aesdec %xmm11, %xmm5
aesdec %xmm11, %xmm6
aesdec %xmm11, %xmm7
aesdec %xmm11, %xmm8
jb DLAST_8
movdqa 160(%r8), %xmm10
movdqa 176(%r8), %xmm11
movdqa 192(%r8), %xmm12
cmpl $14, %r9d
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
aesdec %xmm10, %xmm5
aesdec %xmm10, %xmm6
aesdec %xmm10, %xmm7
aesdec %xmm10, %xmm8
aesdec %xmm11, %xmm1
aesdec %xmm11, %xmm2
aesdec %xmm11, %xmm3
aesdec %xmm11, %xmm4
aesdec %xmm11, %xmm5
aesdec %xmm11, %xmm6
aesdec %xmm11, %xmm7
aesdec %xmm11, %xmm8
jb DLAST_8
movdqa 192(%r8), %xmm10
movdqa 208(%r8), %xmm11
movdqa 224(%r8), %xmm12
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
aesdec %xmm10, %xmm5
aesdec %xmm10, %xmm6
aesdec %xmm10, %xmm7
aesdec %xmm10, %xmm8
aesdec %xmm11, %xmm1
aesdec %xmm11, %xmm2
aesdec %xmm11, %xmm3
aesdec %xmm11, %xmm4
aesdec %xmm11, %xmm5
aesdec %xmm11, %xmm6
aesdec %xmm11, %xmm7
aesdec %xmm11, %xmm8
DLAST_8:
addq $128, %rsi
aesdeclast %xmm12, %xmm1
aesdeclast %xmm12, %xmm2
aesdeclast %xmm12, %xmm3
aesdeclast %xmm12, %xmm4
aesdeclast %xmm12, %xmm5
aesdeclast %xmm12, %xmm6
aesdeclast %xmm12, %xmm7
aesdeclast %xmm12, %xmm8
movdqu (%rdi), %xmm10
movdqu 16(%rdi), %xmm11
movdqu 32(%rdi), %xmm12
movdqu 48(%rdi), %xmm13
pxor %xmm9, %xmm1
pxor %xmm10, %xmm2
pxor %xmm11, %xmm3
pxor %xmm12, %xmm4
pxor %xmm13, %xmm5
movdqu 64(%rdi), %xmm10
movdqu 80(%rdi), %xmm11
movdqu 96(%rdi), %xmm12
movdqu 112(%rdi), %xmm9
pxor %xmm10, %xmm6
pxor %xmm11, %xmm7
pxor %xmm12, %xmm8
movdqu %xmm1, (%rsi)
movdqu %xmm2, 16(%rsi)
movdqu %xmm3, 32(%rsi)
movdqu %xmm4, 48(%rsi)
movdqu %xmm5, 64(%rsi)
movdqu %xmm6, 80(%rsi)
movdqu %xmm7, 96(%rsi)
movdqu %xmm8, 112(%rsi)
addq $128, %rdi
decq %rcx
jne DLOOP_8
addq $128, %rsi
DREMAINDER_8:
cmpq $0, %r10
je DEND_8
DLOOP_8_2:
movdqu (%rdi), %xmm1
movdqa %xmm1, %xmm10
addq $16, %rdi
pxor (%r8), %xmm1
movdqu 160(%r8), %xmm2
cmpl $12, %r9d
aesdec 16(%r8), %xmm1
aesdec 32(%r8), %xmm1
aesdec 48(%r8), %xmm1
aesdec 64(%r8), %xmm1
aesdec 80(%r8), %xmm1
aesdec 96(%r8), %xmm1
aesdec 112(%r8), %xmm1
aesdec 128(%r8), %xmm1
aesdec 144(%r8), %xmm1
jb DLAST_8_2
movdqu 192(%r8), %xmm2
cmpl $14, %r9d
aesdec 160(%r8), %xmm1
aesdec 176(%r8), %xmm1
jb DLAST_8_2
movdqu 224(%r8), %xmm2
aesdec 192(%r8), %xmm1
aesdec 208(%r8), %xmm1
DLAST_8_2:
aesdeclast %xmm2, %xmm1
pxor %xmm9, %xmm1
movdqa %xmm10, %xmm9
movdqu %xmm1, (%rsi)
addq $16, %rsi
decq %r10
jne DLOOP_8_2
DEND_8:
ret
#endif /* WOLFSSL_AESNI_BYx */
/*
AES_ECB_encrypt_AESNI (const unsigned char *in,
unsigned char *out,
unsigned long length,
const unsigned char *KS,
int nr)
*/
#ifndef __APPLE__
.globl AES_ECB_encrypt_AESNI
AES_ECB_encrypt_AESNI:
#else
.globl _AES_ECB_encrypt_AESNI
_AES_ECB_encrypt_AESNI:
#endif
# parameter 1: %rdi
# parameter 2: %rsi
# parameter 3: %rdx
# parameter 4: %rcx
# parameter 5: %r8d
movq %rdx, %r10
shrq $4, %rdx
shlq $60, %r10
je EECB_NO_PARTS_4
addq $1, %rdx
EECB_NO_PARTS_4:
movq %rdx, %r10
shlq $62, %r10
shrq $62, %r10
shrq $2, %rdx
je EECB_REMAINDER_4
subq $64, %rsi
EECB_LOOP_4:
movdqu (%rdi), %xmm1
movdqu 16(%rdi), %xmm2
movdqu 32(%rdi), %xmm3
movdqu 48(%rdi), %xmm4
movdqa (%rcx), %xmm9
movdqa 16(%rcx), %xmm10
movdqa 32(%rcx), %xmm11
movdqa 48(%rcx), %xmm12
pxor %xmm9, %xmm1
pxor %xmm9, %xmm2
pxor %xmm9, %xmm3
pxor %xmm9, %xmm4
aesenc %xmm10, %xmm1
aesenc %xmm10, %xmm2
aesenc %xmm10, %xmm3
aesenc %xmm10, %xmm4
aesenc %xmm11, %xmm1
aesenc %xmm11, %xmm2
aesenc %xmm11, %xmm3
aesenc %xmm11, %xmm4
aesenc %xmm12, %xmm1
aesenc %xmm12, %xmm2
aesenc %xmm12, %xmm3
aesenc %xmm12, %xmm4
movdqa 64(%rcx), %xmm9
movdqa 80(%rcx), %xmm10
movdqa 96(%rcx), %xmm11
movdqa 112(%rcx), %xmm12
aesenc %xmm9, %xmm1
aesenc %xmm9, %xmm2
aesenc %xmm9, %xmm3
aesenc %xmm9, %xmm4
aesenc %xmm10, %xmm1
aesenc %xmm10, %xmm2
aesenc %xmm10, %xmm3
aesenc %xmm10, %xmm4
aesenc %xmm11, %xmm1
aesenc %xmm11, %xmm2
aesenc %xmm11, %xmm3
aesenc %xmm11, %xmm4
aesenc %xmm12, %xmm1
aesenc %xmm12, %xmm2
aesenc %xmm12, %xmm3
aesenc %xmm12, %xmm4
movdqa 128(%rcx), %xmm9
movdqa 144(%rcx), %xmm10
movdqa 160(%rcx), %xmm11
cmpl $12, %r8d
aesenc %xmm9, %xmm1
aesenc %xmm9, %xmm2
aesenc %xmm9, %xmm3
aesenc %xmm9, %xmm4
aesenc %xmm10, %xmm1
aesenc %xmm10, %xmm2
aesenc %xmm10, %xmm3
aesenc %xmm10, %xmm4
jb EECB_LAST_4
movdqa 160(%rcx), %xmm9
movdqa 176(%rcx), %xmm10
movdqa 192(%rcx), %xmm11
cmpl $14, %r8d
aesenc %xmm9, %xmm1
aesenc %xmm9, %xmm2
aesenc %xmm9, %xmm3
aesenc %xmm9, %xmm4
aesenc %xmm10, %xmm1
aesenc %xmm10, %xmm2
aesenc %xmm10, %xmm3
aesenc %xmm10, %xmm4
jb EECB_LAST_4
movdqa 192(%rcx), %xmm9
movdqa 208(%rcx), %xmm10
movdqa 224(%rcx), %xmm11
aesenc %xmm9, %xmm1
aesenc %xmm9, %xmm2
aesenc %xmm9, %xmm3
aesenc %xmm9, %xmm4
aesenc %xmm10, %xmm1
aesenc %xmm10, %xmm2
aesenc %xmm10, %xmm3
aesenc %xmm10, %xmm4
EECB_LAST_4:
addq $64, %rdi
addq $64, %rsi
decq %rdx
aesenclast %xmm11, %xmm1
aesenclast %xmm11, %xmm2
aesenclast %xmm11, %xmm3
aesenclast %xmm11, %xmm4
movdqu %xmm1, (%rsi)
movdqu %xmm2, 16(%rsi)
movdqu %xmm3, 32(%rsi)
movdqu %xmm4, 48(%rsi)
jne EECB_LOOP_4
addq $64, %rsi
EECB_REMAINDER_4:
cmpq $0, %r10
je EECB_END_4
EECB_LOOP_4_2:
movdqu (%rdi), %xmm1
addq $16, %rdi
pxor (%rcx), %xmm1
movdqu 160(%rcx), %xmm2
aesenc 16(%rcx), %xmm1
aesenc 32(%rcx), %xmm1
aesenc 48(%rcx), %xmm1
aesenc 64(%rcx), %xmm1
aesenc 80(%rcx), %xmm1
aesenc 96(%rcx), %xmm1
aesenc 112(%rcx), %xmm1
aesenc 128(%rcx), %xmm1
aesenc 144(%rcx), %xmm1
cmpl $12, %r8d
jb EECB_LAST_4_2
movdqu 192(%rcx), %xmm2
aesenc 160(%rcx), %xmm1
aesenc 176(%rcx), %xmm1
cmpl $14, %r8d
jb EECB_LAST_4_2
movdqu 224(%rcx), %xmm2
aesenc 192(%rcx), %xmm1
aesenc 208(%rcx), %xmm1
EECB_LAST_4_2:
aesenclast %xmm2, %xmm1
movdqu %xmm1, (%rsi)
addq $16, %rsi
decq %r10
jne EECB_LOOP_4_2
EECB_END_4:
ret
/*
AES_ECB_decrypt_AESNI (const unsigned char *in,
unsigned char *out,
unsigned long length,
const unsigned char *KS,
int nr)
*/
#ifndef __APPLE__
.globl AES_ECB_decrypt_AESNI
AES_ECB_decrypt_AESNI:
#else
.globl _AES_ECB_decrypt_AESNI
_AES_ECB_decrypt_AESNI:
#endif
# parameter 1: %rdi
# parameter 2: %rsi
# parameter 3: %rdx
# parameter 4: %rcx
# parameter 5: %r8d
movq %rdx, %r10
shrq $4, %rdx
shlq $60, %r10
je DECB_NO_PARTS_4
addq $1, %rdx
DECB_NO_PARTS_4:
movq %rdx, %r10
shlq $62, %r10
shrq $62, %r10
shrq $2, %rdx
je DECB_REMAINDER_4
subq $64, %rsi
DECB_LOOP_4:
movdqu (%rdi), %xmm1
movdqu 16(%rdi), %xmm2
movdqu 32(%rdi), %xmm3
movdqu 48(%rdi), %xmm4
movdqa (%rcx), %xmm9
movdqa 16(%rcx), %xmm10
movdqa 32(%rcx), %xmm11
movdqa 48(%rcx), %xmm12
pxor %xmm9, %xmm1
pxor %xmm9, %xmm2
pxor %xmm9, %xmm3
pxor %xmm9, %xmm4
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
aesdec %xmm11, %xmm1
aesdec %xmm11, %xmm2
aesdec %xmm11, %xmm3
aesdec %xmm11, %xmm4
aesdec %xmm12, %xmm1
aesdec %xmm12, %xmm2
aesdec %xmm12, %xmm3
aesdec %xmm12, %xmm4
movdqa 64(%rcx), %xmm9
movdqa 80(%rcx), %xmm10
movdqa 96(%rcx), %xmm11
movdqa 112(%rcx), %xmm12
aesdec %xmm9, %xmm1
aesdec %xmm9, %xmm2
aesdec %xmm9, %xmm3
aesdec %xmm9, %xmm4
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
aesdec %xmm11, %xmm1
aesdec %xmm11, %xmm2
aesdec %xmm11, %xmm3
aesdec %xmm11, %xmm4
aesdec %xmm12, %xmm1
aesdec %xmm12, %xmm2
aesdec %xmm12, %xmm3
aesdec %xmm12, %xmm4
movdqa 128(%rcx), %xmm9
movdqa 144(%rcx), %xmm10
movdqa 160(%rcx), %xmm11
cmpl $12, %r8d
aesdec %xmm9, %xmm1
aesdec %xmm9, %xmm2
aesdec %xmm9, %xmm3
aesdec %xmm9, %xmm4
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
jb DECB_LAST_4
movdqa 160(%rcx), %xmm9
movdqa 176(%rcx), %xmm10
movdqa 192(%rcx), %xmm11
cmpl $14, %r8d
aesdec %xmm9, %xmm1
aesdec %xmm9, %xmm2
aesdec %xmm9, %xmm3
aesdec %xmm9, %xmm4
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
jb DECB_LAST_4
movdqa 192(%rcx), %xmm9
movdqa 208(%rcx), %xmm10
movdqa 224(%rcx), %xmm11
aesdec %xmm9, %xmm1
aesdec %xmm9, %xmm2
aesdec %xmm9, %xmm3
aesdec %xmm9, %xmm4
aesdec %xmm10, %xmm1
aesdec %xmm10, %xmm2
aesdec %xmm10, %xmm3
aesdec %xmm10, %xmm4
DECB_LAST_4:
addq $64, %rdi
addq $64, %rsi
decq %rdx
aesdeclast %xmm11, %xmm1
aesdeclast %xmm11, %xmm2
aesdeclast %xmm11, %xmm3
aesdeclast %xmm11, %xmm4
movdqu %xmm1, (%rsi)
movdqu %xmm2, 16(%rsi)
movdqu %xmm3, 32(%rsi)
movdqu %xmm4, 48(%rsi)
jne DECB_LOOP_4
addq $64, %rsi
DECB_REMAINDER_4:
cmpq $0, %r10
je DECB_END_4
DECB_LOOP_4_2:
movdqu (%rdi), %xmm1
addq $16, %rdi
pxor (%rcx), %xmm1
movdqu 160(%rcx), %xmm2
cmpl $12, %r8d
aesdec 16(%rcx), %xmm1
aesdec 32(%rcx), %xmm1
aesdec 48(%rcx), %xmm1
aesdec 64(%rcx), %xmm1
aesdec 80(%rcx), %xmm1
aesdec 96(%rcx), %xmm1
aesdec 112(%rcx), %xmm1
aesdec 128(%rcx), %xmm1
aesdec 144(%rcx), %xmm1
jb DECB_LAST_4_2
cmpl $14, %r8d
movdqu 192(%rcx), %xmm2
aesdec 160(%rcx), %xmm1
aesdec 176(%rcx), %xmm1
jb DECB_LAST_4_2
movdqu 224(%rcx), %xmm2
aesdec 192(%rcx), %xmm1
aesdec 208(%rcx), %xmm1
DECB_LAST_4_2:
aesdeclast %xmm2, %xmm1
movdqu %xmm1, (%rsi)
addq $16, %rsi
decq %r10
jne DECB_LOOP_4_2
DECB_END_4:
ret
/*
void AES_128_Key_Expansion_AESNI(const unsigned char* userkey,
unsigned char* key_schedule);
*/
#ifndef __APPLE__
.globl AES_128_Key_Expansion_AESNI
.align 16,0x90
AES_128_Key_Expansion_AESNI:
#else
.globl _AES_128_Key_Expansion_AESNI
.p2align 4
_AES_128_Key_Expansion_AESNI:
#endif
# parameter 1: %rdi
# parameter 2: %rsi
movdqu (%rdi), %xmm1
movdqa %xmm1, (%rsi)
ASSISTS:
aeskeygenassist $1, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 16(%rsi)
aeskeygenassist $2, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 32(%rsi)
aeskeygenassist $4, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 48(%rsi)
aeskeygenassist $8, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 64(%rsi)
aeskeygenassist $16, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 80(%rsi)
aeskeygenassist $32, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 96(%rsi)
aeskeygenassist $64, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 112(%rsi)
aeskeygenassist $0x80, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 128(%rsi)
aeskeygenassist $0x1b, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 144(%rsi)
aeskeygenassist $0x36, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 160(%rsi)
ret
PREPARE_ROUNDKEY_128:
pshufd $255, %xmm2, %xmm2
movdqa %xmm1, %xmm3
pslldq $4, %xmm3
pxor %xmm3, %xmm1
pslldq $4, %xmm3
pxor %xmm3, %xmm1
pslldq $4, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
ret
/*
void AES_192_Key_Expansion_AESNI (const unsigned char *userkey,
unsigned char *key)
*/
#ifndef __APPLE__
.globl AES_192_Key_Expansion_AESNI
AES_192_Key_Expansion_AESNI:
#else
.globl _AES_192_Key_Expansion_AESNI
_AES_192_Key_Expansion_AESNI:
#endif
# parameter 1: %rdi
# parameter 2: %rsi
movdqu (%rdi), %xmm1
movq 16(%rdi), %xmm3
movdqa %xmm1, (%rsi)
movdqa %xmm3, %xmm5
aeskeygenassist $0x1, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
shufpd $0, %xmm1, %xmm5
movdqa %xmm5, 16(%rsi)
movdqa %xmm1, %xmm6
shufpd $1, %xmm3, %xmm6
movdqa %xmm6, 32(%rsi)
aeskeygenassist $0x2, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
movdqa %xmm1, 48(%rsi)
movdqa %xmm3, %xmm5
aeskeygenassist $0x4, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
shufpd $0, %xmm1, %xmm5
movdqa %xmm5, 64(%rsi)
movdqa %xmm1, %xmm6
shufpd $1, %xmm3, %xmm6
movdqa %xmm6, 80(%rsi)
aeskeygenassist $0x8, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
movdqa %xmm1, 96(%rsi)
movdqa %xmm3, %xmm5
aeskeygenassist $0x10, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
shufpd $0, %xmm1, %xmm5
movdqa %xmm5, 112(%rsi)
movdqa %xmm1, %xmm6
shufpd $1, %xmm3, %xmm6
movdqa %xmm6, 128(%rsi)
aeskeygenassist $0x20, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
movdqa %xmm1, 144(%rsi)
movdqa %xmm3, %xmm5
aeskeygenassist $0x40, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
shufpd $0, %xmm1, %xmm5
movdqa %xmm5, 160(%rsi)
movdqa %xmm1, %xmm6
shufpd $1, %xmm3, %xmm6
movdqa %xmm6, 176(%rsi)
aeskeygenassist $0x80, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
movdqa %xmm1, 192(%rsi)
movdqa %xmm3, 208(%rsi)
ret
PREPARE_ROUNDKEY_192:
pshufd $0x55, %xmm2, %xmm2
movdqu %xmm1, %xmm4
pslldq $4, %xmm4
pxor %xmm4, %xmm1
pslldq $4, %xmm4
pxor %xmm4, %xmm1
pslldq $4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
pshufd $0xff, %xmm1, %xmm2
movdqu %xmm3, %xmm4
pslldq $4, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
ret
/*
void AES_256_Key_Expansion_AESNI (const unsigned char *userkey,
unsigned char *key)
*/
#ifndef __APPLE__
.globl AES_256_Key_Expansion_AESNI
AES_256_Key_Expansion_AESNI:
#else
.globl _AES_256_Key_Expansion_AESNI
_AES_256_Key_Expansion_AESNI:
#endif
# parameter 1: %rdi
# parameter 2: %rsi
movdqu (%rdi), %xmm1
movdqu 16(%rdi), %xmm3
movdqa %xmm1, (%rsi)
movdqa %xmm3, 16(%rsi)
aeskeygenassist $0x1, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 32(%rsi)
aeskeygenassist $0x0, %xmm1, %xmm2
call MAKE_RK256_b
movdqa %xmm3, 48(%rsi)
aeskeygenassist $0x2, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 64(%rsi)
aeskeygenassist $0x0, %xmm1, %xmm2
call MAKE_RK256_b
movdqa %xmm3, 80(%rsi)
aeskeygenassist $0x4, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 96(%rsi)
aeskeygenassist $0x0, %xmm1, %xmm2
call MAKE_RK256_b
movdqa %xmm3, 112(%rsi)
aeskeygenassist $0x8, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 128(%rsi)
aeskeygenassist $0x0, %xmm1, %xmm2
call MAKE_RK256_b
movdqa %xmm3, 144(%rsi)
aeskeygenassist $0x10, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 160(%rsi)
aeskeygenassist $0x0, %xmm1, %xmm2
call MAKE_RK256_b
movdqa %xmm3, 176(%rsi)
aeskeygenassist $0x20, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 192(%rsi)
aeskeygenassist $0x0, %xmm1, %xmm2
call MAKE_RK256_b
movdqa %xmm3, 208(%rsi)
aeskeygenassist $0x40, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 224(%rsi)
ret
MAKE_RK256_a:
pshufd $0xff, %xmm2, %xmm2
movdqa %xmm1, %xmm4
pslldq $4, %xmm4
pxor %xmm4, %xmm1
pslldq $4, %xmm4
pxor %xmm4, %xmm1
pslldq $4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
ret
MAKE_RK256_b:
pshufd $0xaa, %xmm2, %xmm2
movdqa %xmm3, %xmm4
pslldq $4, %xmm4
pxor %xmm4, %xmm3
pslldq $4, %xmm4
pxor %xmm4, %xmm3
pslldq $4, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
ret
#elif defined WOLFSSL_X86_BUILD
/*
AES_CBC_encrypt_AESNI (const unsigned char *in,
unsigned char *out,
unsigned char ivec[16],
unsigned long length,
const unsigned char *KS,
int nr)
*/
#ifndef __APPLE__
.globl AES_CBC_encrypt_AESNI
AES_CBC_encrypt_AESNI:
#else
.globl _AES_CBC_encrypt_AESNI
_AES_CBC_encrypt_AESNI:
#endif
# parameter 1: stack[4] => %edi
# parameter 2: stack[8] => %esi
# parameter 3: stack[12] => %edx
# parameter 4: stack[16] => %ecx
# parameter 5: stack[20] => %eax
# parameter 6: stack[24] => %ebx
push %edi
push %esi
push %ebx
push %ebp
movl 20(%esp), %edi
movl 24(%esp), %esi
movl 28(%esp), %edx
movl 32(%esp), %ecx
movl 36(%esp), %eax
movl 40(%esp), %ebx
movl %ecx, %ebp
shrl $4, %ecx
shll $60, %ebp
je NO_PARTS
addl $1, %ecx
NO_PARTS:
subl $16, %esi
movdqa (%edx), %xmm1
LOOP:
pxor (%edi), %xmm1
pxor (%eax), %xmm1
addl $16,%esi
addl $16,%edi
cmpl $12, %ebx
aesenc 16(%eax),%xmm1
aesenc 32(%eax),%xmm1
aesenc 48(%eax),%xmm1
aesenc 64(%eax),%xmm1
aesenc 80(%eax),%xmm1
aesenc 96(%eax),%xmm1
aesenc 112(%eax),%xmm1
aesenc 128(%eax),%xmm1
aesenc 144(%eax),%xmm1
movdqa 160(%eax),%xmm2
jb LAST
cmpl $14, %ebx
aesenc 160(%eax),%xmm1
aesenc 176(%eax),%xmm1
movdqa 192(%eax),%xmm2
jb LAST
aesenc 192(%eax),%xmm1
aesenc 208(%eax),%xmm1
movdqa 224(%eax),%xmm2
LAST:
decl %ecx
aesenclast %xmm2,%xmm1
movdqu %xmm1,(%esi)
jne LOOP
pop %ebp
pop %ebx
pop %esi
pop %edi
ret
/*
AES_CBC_decrypt_AESNI_by4 (const unsigned char *in,
unsigned char *out,
unsigned char ivec[16],
unsigned long length,
const unsigned char *KS,
int nr)
*/
#ifndef __APPLE__
.globl AES_CBC_decrypt_AESNI_by4
AES_CBC_decrypt_AESNI_by4:
#else
.globl _AES_CBC_decrypt_AESNI_by4
_AES_CBC_decrypt_AESNI_by4:
#endif
# parameter 1: stack[4] => %edi
# parameter 2: stack[8] => %esi
# parameter 3: stack[12] => %edx
# parameter 4: stack[16] => %ecx
# parameter 5: stack[20] => %eax
# parameter 6: stack[24] => %ebx
push %edi
push %esi
push %ebx
push %ebp
movl 20(%esp), %edi
movl 24(%esp), %esi
movl 28(%esp), %edx
movl 32(%esp), %ecx
movl 36(%esp), %eax
movl 40(%esp), %ebx
subl $16, %esp
movdqu (%edx), %xmm0
movl %ecx, %ebp
shrl $4, %ecx
shll $60, %ebp
movdqu %xmm0, (%esp)
je DNO_PARTS_4
addl $1, %ecx
DNO_PARTS_4:
movl %ecx, %ebp
shll $62, %ebp
shrl $62, %ebp
shrl $2, %ecx
je DREMAINDER_4
subl $64, %esi
DLOOP_4:
movdqu (%edi), %xmm1
movdqu 16(%edi), %xmm2
movdqu 32(%edi), %xmm3
movdqu 48(%edi), %xmm4
movdqa (%eax), %xmm5
movdqa 16(%eax), %xmm6
movdqa 32(%eax), %xmm7
movdqa 48(%eax), %xmm0
pxor %xmm5, %xmm1
pxor %xmm5, %xmm2
pxor %xmm5, %xmm3
pxor %xmm5, %xmm4
aesdec %xmm6, %xmm1
aesdec %xmm6, %xmm2
aesdec %xmm6, %xmm3
aesdec %xmm6, %xmm4
aesdec %xmm7, %xmm1
aesdec %xmm7, %xmm2
aesdec %xmm7, %xmm3
aesdec %xmm7, %xmm4
aesdec %xmm0, %xmm1
aesdec %xmm0, %xmm2
aesdec %xmm0, %xmm3
aesdec %xmm0, %xmm4
movdqa 64(%eax), %xmm5
movdqa 80(%eax), %xmm6
movdqa 96(%eax), %xmm7
movdqa 112(%eax), %xmm0
aesdec %xmm5, %xmm1
aesdec %xmm5, %xmm2
aesdec %xmm5, %xmm3
aesdec %xmm5, %xmm4
aesdec %xmm6, %xmm1
aesdec %xmm6, %xmm2
aesdec %xmm6, %xmm3
aesdec %xmm6, %xmm4
aesdec %xmm7, %xmm1
aesdec %xmm7, %xmm2
aesdec %xmm7, %xmm3
aesdec %xmm7, %xmm4
aesdec %xmm0, %xmm1
aesdec %xmm0, %xmm2
aesdec %xmm0, %xmm3
aesdec %xmm0, %xmm4
movdqa 128(%eax), %xmm5
movdqa 144(%eax), %xmm6
movdqa 160(%eax), %xmm7
cmpl $12, %ebx
aesdec %xmm5, %xmm1
aesdec %xmm5, %xmm2
aesdec %xmm5, %xmm3
aesdec %xmm5, %xmm4
aesdec %xmm6, %xmm1
aesdec %xmm6, %xmm2
aesdec %xmm6, %xmm3
aesdec %xmm6, %xmm4
jb DLAST_4
movdqa 160(%eax), %xmm5
movdqa 176(%eax), %xmm6
movdqa 192(%eax), %xmm7
cmpl $14, %ebx
aesdec %xmm5, %xmm1
aesdec %xmm5, %xmm2
aesdec %xmm5, %xmm3
aesdec %xmm5, %xmm4
aesdec %xmm6, %xmm1
aesdec %xmm6, %xmm2
aesdec %xmm6, %xmm3
aesdec %xmm6, %xmm4
jb DLAST_4
movdqa 192(%eax), %xmm5
movdqa 208(%eax), %xmm6
movdqa 224(%eax), %xmm7
aesdec %xmm5, %xmm1
aesdec %xmm5, %xmm2
aesdec %xmm5, %xmm3
aesdec %xmm5, %xmm4
aesdec %xmm6, %xmm1
aesdec %xmm6, %xmm2
aesdec %xmm6, %xmm3
aesdec %xmm6, %xmm4
DLAST_4:
addl $64, %esi
aesdeclast %xmm7, %xmm1
aesdeclast %xmm7, %xmm2
aesdeclast %xmm7, %xmm3
aesdeclast %xmm7, %xmm4
movdqu (%esp), %xmm0
movdqu (%edi), %xmm5
movdqu 16(%edi), %xmm6
movdqu 32(%edi), %xmm7
pxor %xmm0, %xmm1
pxor %xmm5, %xmm2
pxor %xmm6, %xmm3
pxor %xmm7, %xmm4
movdqu 48(%edi), %xmm0
movdqu %xmm1, (%esi)
movdqu %xmm2, 16(%esi)
movdqu %xmm3, 32(%esi)
movdqu %xmm4, 48(%esi)
movdqu %xmm0, (%esp)
addl $64, %edi
decl %ecx
jne DLOOP_4
addl $64, %esi
DREMAINDER_4:
cmpl $0, %ebp
je DEND_4
DLOOP_4_2:
movdqu (%edi), %xmm1
movdqa %xmm1, %xmm5
addl $16, %edi
pxor (%eax), %xmm1
movdqu 160(%eax), %xmm2
cmpl $12, %ebx
aesdec 16(%eax), %xmm1
aesdec 32(%eax), %xmm1
aesdec 48(%eax), %xmm1
aesdec 64(%eax), %xmm1
aesdec 80(%eax), %xmm1
aesdec 96(%eax), %xmm1
aesdec 112(%eax), %xmm1
aesdec 128(%eax), %xmm1
aesdec 144(%eax), %xmm1
jb DLAST_4_2
movdqu 192(%eax), %xmm2
cmpl $14, %ebx
aesdec 160(%eax), %xmm1
aesdec 176(%eax), %xmm1
jb DLAST_4_2
movdqu 224(%eax), %xmm2
aesdec 192(%eax), %xmm1
aesdec 208(%eax), %xmm1
DLAST_4_2:
aesdeclast %xmm2, %xmm1
pxor %xmm0, %xmm1
movdqa %xmm5, %xmm0
movdqu %xmm1, (%esi)
addl $16, %esi
decl %ebp
jne DLOOP_4_2
DEND_4:
addl $16, %esp
pop %ebp
pop %ebx
pop %esi
pop %edi
ret
/*
AES_ECB_encrypt_AESNI (const unsigned char *in,
unsigned char *out,
unsigned long length,
const unsigned char *KS,
int nr)
*/
#ifndef __APPLE__
.globl AES_ECB_encrypt_AESNI
AES_ECB_encrypt_AESNI:
#else
.globl _AES_ECB_encrypt_AESNI
_AES_ECB_encrypt_AESNI:
#endif
# parameter 1: stack[4] => %edi
# parameter 2: stack[8] => %esi
# parameter 3: stack[12] => %edx
# parameter 4: stack[16] => %ecx
# parameter 5: stack[20] => %eax
push %edi
push %esi
push %ebx
movl 16(%esp), %edi
movl 20(%esp), %esi
movl 24(%esp), %edx
movl 28(%esp), %ecx
movl 32(%esp), %eax
movl %edx, %ebx
shrl $4, %edx
shll $60, %ebx
je EECB_NO_PARTS_4
addl $1, %edx
EECB_NO_PARTS_4:
movl %edx, %ebx
shll $62, %ebx
shrl $62, %ebx
shrl $2, %edx
je EECB_REMAINDER_4
subl $64, %esi
EECB_LOOP_4:
movdqu (%edi), %xmm1
movdqu 16(%edi), %xmm2
movdqu 32(%edi), %xmm3
movdqu 48(%edi), %xmm4
movdqa (%ecx), %xmm5
movdqa 16(%ecx), %xmm6
movdqa 32(%ecx), %xmm7
movdqa 48(%ecx), %xmm0
pxor %xmm5, %xmm1
pxor %xmm5, %xmm2
pxor %xmm5, %xmm3
pxor %xmm5, %xmm4
aesenc %xmm6, %xmm1
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm3
aesenc %xmm6, %xmm4
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
aesenc %xmm7, %xmm4
aesenc %xmm0, %xmm1
aesenc %xmm0, %xmm2
aesenc %xmm0, %xmm3
aesenc %xmm0, %xmm4
movdqa 64(%ecx), %xmm5
movdqa 80(%ecx), %xmm6
movdqa 96(%ecx), %xmm7
movdqa 112(%ecx), %xmm0
aesenc %xmm5, %xmm1
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm3
aesenc %xmm5, %xmm4
aesenc %xmm6, %xmm1
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm3
aesenc %xmm6, %xmm4
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
aesenc %xmm7, %xmm4
aesenc %xmm0, %xmm1
aesenc %xmm0, %xmm2
aesenc %xmm0, %xmm3
aesenc %xmm0, %xmm4
movdqa 128(%ecx), %xmm5
movdqa 144(%ecx), %xmm6
movdqa 160(%ecx), %xmm7
cmpl $12, %eax
aesenc %xmm5, %xmm1
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm3
aesenc %xmm5, %xmm4
aesenc %xmm6, %xmm1
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm3
aesenc %xmm6, %xmm4
jb EECB_LAST_4
movdqa 160(%ecx), %xmm5
movdqa 176(%ecx), %xmm6
movdqa 192(%ecx), %xmm7
cmpl $14, %eax
aesenc %xmm5, %xmm1
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm3
aesenc %xmm5, %xmm4
aesenc %xmm6, %xmm1
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm3
aesenc %xmm6, %xmm4
jb EECB_LAST_4
movdqa 192(%ecx), %xmm5
movdqa 208(%ecx), %xmm6
movdqa 224(%ecx), %xmm7
aesenc %xmm5, %xmm1
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm3
aesenc %xmm5, %xmm4
aesenc %xmm6, %xmm1
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm3
aesenc %xmm6, %xmm4
EECB_LAST_4:
addl $64, %edi
addl $64, %esi
decl %edx
aesenclast %xmm7, %xmm1
aesenclast %xmm7, %xmm2
aesenclast %xmm7, %xmm3
aesenclast %xmm7, %xmm4
movdqu %xmm1, (%esi)
movdqu %xmm2, 16(%esi)
movdqu %xmm3, 32(%esi)
movdqu %xmm4, 48(%esi)
jne EECB_LOOP_4
addl $64, %esi
EECB_REMAINDER_4:
cmpl $0, %ebx
je EECB_END_4
EECB_LOOP_4_2:
movdqu (%edi), %xmm1
addl $16, %edi
pxor (%ecx), %xmm1
movdqu 160(%ecx), %xmm2
aesenc 16(%ecx), %xmm1
aesenc 32(%ecx), %xmm1
aesenc 48(%ecx), %xmm1
aesenc 64(%ecx), %xmm1
aesenc 80(%ecx), %xmm1
aesenc 96(%ecx), %xmm1
aesenc 112(%ecx), %xmm1
aesenc 128(%ecx), %xmm1
aesenc 144(%ecx), %xmm1
cmpl $12, %eax
jb EECB_LAST_4_2
movdqu 192(%ecx), %xmm2
aesenc 160(%ecx), %xmm1
aesenc 176(%ecx), %xmm1
cmpl $14, %eax
jb EECB_LAST_4_2
movdqu 224(%ecx), %xmm2
aesenc 192(%ecx), %xmm1
aesenc 208(%ecx), %xmm1
EECB_LAST_4_2:
aesenclast %xmm2, %xmm1
movdqu %xmm1, (%esi)
addl $16, %esi
decl %ebx
jne EECB_LOOP_4_2
EECB_END_4:
pop %ebx
pop %esi
pop %edi
ret
/*
AES_ECB_decrypt_AESNI (const unsigned char *in,
unsigned char *out,
unsigned long length,
const unsigned char *KS,
int nr)
*/
#ifndef __APPLE__
.globl AES_ECB_decrypt_AESNI
AES_ECB_decrypt_AESNI:
#else
.globl _AES_ECB_decrypt_AESNI
_AES_ECB_decrypt_AESNI:
#endif
# parameter 1: stack[4] => %edi
# parameter 2: stack[8] => %esi
# parameter 3: stack[12] => %edx
# parameter 4: stack[16] => %ecx
# parameter 5: stack[20] => %eax
push %edi
push %esi
push %ebx
movl 20(%esp), %edi
movl 24(%esp), %esi
movl 28(%esp), %edx
movl 32(%esp), %ecx
movl 36(%esp), %eax
movl %edx, %ebx
shrl $4, %edx
shll $60, %ebx
je DECB_NO_PARTS_4
addl $1, %edx
DECB_NO_PARTS_4:
movl %edx, %ebx
shll $62, %ebx
shrl $62, %ebx
shrl $2, %edx
je DECB_REMAINDER_4
subl $64, %esi
DECB_LOOP_4:
movdqu (%edi), %xmm1
movdqu 16(%edi), %xmm2
movdqu 32(%edi), %xmm3
movdqu 48(%edi), %xmm4
movdqa (%ecx), %xmm5
movdqa 16(%ecx), %xmm6
movdqa 32(%ecx), %xmm7
movdqa 48(%ecx), %xmm0
pxor %xmm5, %xmm1
pxor %xmm5, %xmm2
pxor %xmm5, %xmm3
pxor %xmm5, %xmm4
aesdec %xmm6, %xmm1
aesdec %xmm6, %xmm2
aesdec %xmm6, %xmm3
aesdec %xmm6, %xmm4
aesdec %xmm7, %xmm1
aesdec %xmm7, %xmm2
aesdec %xmm7, %xmm3
aesdec %xmm7, %xmm4
aesdec %xmm0, %xmm1
aesdec %xmm0, %xmm2
aesdec %xmm0, %xmm3
aesdec %xmm0, %xmm4
movdqa 64(%ecx), %xmm5
movdqa 80(%ecx), %xmm6
movdqa 96(%ecx), %xmm7
movdqa 112(%ecx), %xmm0
aesdec %xmm5, %xmm1
aesdec %xmm5, %xmm2
aesdec %xmm5, %xmm3
aesdec %xmm5, %xmm4
aesdec %xmm6, %xmm1
aesdec %xmm6, %xmm2
aesdec %xmm6, %xmm3
aesdec %xmm6, %xmm4
aesdec %xmm7, %xmm1
aesdec %xmm7, %xmm2
aesdec %xmm7, %xmm3
aesdec %xmm7, %xmm4
aesdec %xmm0, %xmm1
aesdec %xmm0, %xmm2
aesdec %xmm0, %xmm3
aesdec %xmm0, %xmm4
movdqa 128(%ecx), %xmm5
movdqa 144(%ecx), %xmm6
movdqa 160(%ecx), %xmm7
cmpl $12, %eax
aesdec %xmm5, %xmm1
aesdec %xmm5, %xmm2
aesdec %xmm5, %xmm3
aesdec %xmm5, %xmm4
aesdec %xmm6, %xmm1
aesdec %xmm6, %xmm2
aesdec %xmm6, %xmm3
aesdec %xmm6, %xmm4
jb DECB_LAST_4
movdqa 160(%ecx), %xmm5
movdqa 176(%ecx), %xmm6
movdqa 192(%ecx), %xmm7
cmpl $14, %eax
aesdec %xmm5, %xmm1
aesdec %xmm5, %xmm2
aesdec %xmm5, %xmm3
aesdec %xmm5, %xmm4
aesdec %xmm6, %xmm1
aesdec %xmm6, %xmm2
aesdec %xmm6, %xmm3
aesdec %xmm6, %xmm4
jb DECB_LAST_4
movdqa 192(%ecx), %xmm5
movdqa 208(%ecx), %xmm6
movdqa 224(%ecx), %xmm7
aesdec %xmm5, %xmm1
aesdec %xmm5, %xmm2
aesdec %xmm5, %xmm3
aesdec %xmm5, %xmm4
aesdec %xmm6, %xmm1
aesdec %xmm6, %xmm2
aesdec %xmm6, %xmm3
aesdec %xmm6, %xmm4
DECB_LAST_4:
addl $64, %edi
addl $64, %esi
decl %edx
aesdeclast %xmm7, %xmm1
aesdeclast %xmm7, %xmm2
aesdeclast %xmm7, %xmm3
aesdeclast %xmm7, %xmm4
movdqu %xmm1, (%esi)
movdqu %xmm2, 16(%esi)
movdqu %xmm3, 32(%esi)
movdqu %xmm4, 48(%esi)
jne DECB_LOOP_4
addl $64, %esi
DECB_REMAINDER_4:
cmpl $0, %ebx
je DECB_END_4
DECB_LOOP_4_2:
movdqu (%edi), %xmm1
addl $16, %edi
pxor (%ecx), %xmm1
movdqu 160(%ecx), %xmm2
cmpl $12, %eax
aesdec 16(%ecx), %xmm1
aesdec 32(%ecx), %xmm1
aesdec 48(%ecx), %xmm1
aesdec 64(%ecx), %xmm1
aesdec 80(%ecx), %xmm1
aesdec 96(%ecx), %xmm1
aesdec 112(%ecx), %xmm1
aesdec 128(%ecx), %xmm1
aesdec 144(%ecx), %xmm1
jb DECB_LAST_4_2
cmpl $14, %eax
movdqu 192(%ecx), %xmm2
aesdec 160(%ecx), %xmm1
aesdec 176(%ecx), %xmm1
jb DECB_LAST_4_2
movdqu 224(%ecx), %xmm2
aesdec 192(%ecx), %xmm1
aesdec 208(%ecx), %xmm1
DECB_LAST_4_2:
aesdeclast %xmm2, %xmm1
movdqu %xmm1, (%esi)
addl $16, %esi
decl %ebx
jne DECB_LOOP_4_2
DECB_END_4:
pop %ebx
pop %esi
pop %edi
ret
/*
void AES_128_Key_Expansion_AESNI(const unsigned char* userkey,
unsigned char* key_schedule);
*/
#ifndef __APPLE__
.globl AES_128_Key_Expansion_AESNI
.align 16,0x90
AES_128_Key_Expansion_AESNI:
#else
.globl _AES_128_Key_Expansion_AESNI
.p2align 4
_AES_128_Key_Expansion_AESNI:
#endif
# parameter 1: stack[4] => %eax
# parameter 2: stack[8] => %edx
movl 4(%esp), %eax
movl 8(%esp), %edx
movl $10, 240(%edx)
movdqu (%eax), %xmm1
movdqa %xmm1, (%edx)
ASSISTS:
aeskeygenassist $1, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 16(%edx)
aeskeygenassist $2, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 32(%edx)
aeskeygenassist $4, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 48(%edx)
aeskeygenassist $8, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 64(%edx)
aeskeygenassist $16, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 80(%edx)
aeskeygenassist $32, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 96(%edx)
aeskeygenassist $64, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 112(%edx)
aeskeygenassist $0x80, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 128(%edx)
aeskeygenassist $0x1b, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 144(%edx)
aeskeygenassist $0x36, %xmm1, %xmm2
call PREPARE_ROUNDKEY_128
movdqa %xmm1, 160(%edx)
ret
PREPARE_ROUNDKEY_128:
pshufd $255, %xmm2, %xmm2
movdqa %xmm1, %xmm3
pslldq $4, %xmm3
pxor %xmm3, %xmm1
pslldq $4, %xmm3
pxor %xmm3, %xmm1
pslldq $4, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
ret
/*
void AES_192_Key_Expansion_AESNI (const unsigned char *userkey,
unsigned char *key)
*/
#ifndef __APPLE__
.globl AES_192_Key_Expansion_AESNI
AES_192_Key_Expansion_AESNI:
#else
.globl _AES_192_Key_Expansion_AESNI
_AES_192_Key_Expansion_AESNI:
#endif
# parameter 1: stack[4] => %eax
# parameter 2: stack[8] => %edx
movl 4(%esp), %eax
movl 8(%esp), %edx
movdqu (%eax), %xmm1
movq 16(%eax), %xmm3
movdqa %xmm1, (%edx)
movdqa %xmm3, %xmm5
aeskeygenassist $0x1, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
shufpd $0, %xmm1, %xmm5
movdqa %xmm5, 16(%edx)
movdqa %xmm1, %xmm6
shufpd $1, %xmm3, %xmm6
movdqa %xmm6, 32(%edx)
aeskeygenassist $0x2, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
movdqa %xmm1, 48(%edx)
movdqa %xmm3, %xmm5
aeskeygenassist $0x4, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
shufpd $0, %xmm1, %xmm5
movdqa %xmm5, 64(%edx)
movdqa %xmm1, %xmm6
shufpd $1, %xmm3, %xmm6
movdqa %xmm6, 80(%edx)
aeskeygenassist $0x8, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
movdqa %xmm1, 96(%edx)
movdqa %xmm3, %xmm5
aeskeygenassist $0x10, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
shufpd $0, %xmm1, %xmm5
movdqa %xmm5, 112(%edx)
movdqa %xmm1, %xmm6
shufpd $1, %xmm3, %xmm6
movdqa %xmm6, 128(%edx)
aeskeygenassist $0x20, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
movdqa %xmm1, 144(%edx)
movdqa %xmm3, %xmm5
aeskeygenassist $0x40, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
shufpd $0, %xmm1, %xmm5
movdqa %xmm5, 160(%edx)
movdqa %xmm1, %xmm6
shufpd $1, %xmm3, %xmm6
movdqa %xmm6, 176(%edx)
aeskeygenassist $0x80, %xmm3, %xmm2
call PREPARE_ROUNDKEY_192
movdqa %xmm1, 192(%edx)
movdqa %xmm3, 208(%edx)
ret
PREPARE_ROUNDKEY_192:
pshufd $0x55, %xmm2, %xmm2
movdqu %xmm1, %xmm4
pslldq $4, %xmm4
pxor %xmm4, %xmm1
pslldq $4, %xmm4
pxor %xmm4, %xmm1
pslldq $4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
pshufd $0xff, %xmm1, %xmm2
movdqu %xmm3, %xmm4
pslldq $4, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
ret
/*
void AES_256_Key_Expansion_AESNI (const unsigned char *userkey,
unsigned char *key)
*/
#ifndef __APPLE__
.globl AES_256_Key_Expansion_AESNI
AES_256_Key_Expansion_AESNI:
#else
.globl _AES_256_Key_Expansion_AESNI
_AES_256_Key_Expansion_AESNI:
#endif
# parameter 1: stack[4] => %eax
# parameter 2: stack[8] => %edx
movl 4(%esp), %eax
movl 8(%esp), %edx
movdqu (%eax), %xmm1
movdqu 16(%eax), %xmm3
movdqa %xmm1, (%edx)
movdqa %xmm3, 16(%edx)
aeskeygenassist $0x1, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 32(%edx)
aeskeygenassist $0x0, %xmm1, %xmm2
call MAKE_RK256_b
movdqa %xmm3, 48(%edx)
aeskeygenassist $0x2, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 64(%edx)
aeskeygenassist $0x0, %xmm1, %xmm2
call MAKE_RK256_b
movdqa %xmm3, 80(%edx)
aeskeygenassist $0x4, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 96(%edx)
aeskeygenassist $0x0, %xmm1, %xmm2
call MAKE_RK256_b
movdqa %xmm3, 112(%edx)
aeskeygenassist $0x8, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 128(%edx)
aeskeygenassist $0x0, %xmm1, %xmm2
call MAKE_RK256_b
movdqa %xmm3, 144(%edx)
aeskeygenassist $0x10, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 160(%edx)
aeskeygenassist $0x0, %xmm1, %xmm2
call MAKE_RK256_b
movdqa %xmm3, 176(%edx)
aeskeygenassist $0x20, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 192(%edx)
aeskeygenassist $0x0, %xmm1, %xmm2
call MAKE_RK256_b
movdqa %xmm3, 208(%edx)
aeskeygenassist $0x40, %xmm3, %xmm2
call MAKE_RK256_a
movdqa %xmm1, 224(%edx)
ret
MAKE_RK256_a:
pshufd $0xff, %xmm2, %xmm2
movdqa %xmm1, %xmm4
pslldq $4, %xmm4
pxor %xmm4, %xmm1
pslldq $4, %xmm4
pxor %xmm4, %xmm1
pslldq $4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
ret
MAKE_RK256_b:
pshufd $0xaa, %xmm2, %xmm2
movdqa %xmm3, %xmm4
pslldq $4, %xmm4
pxor %xmm4, %xmm3
pslldq $4, %xmm4
pxor %xmm4, %xmm3
pslldq $4, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
ret
#endif /* WOLFSSL_X86_64_BUILD */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_RestartAdvComIT/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aenu1/aps3e
| 379,511
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/fe_x25519_asm.S
|
/* fe_x25519_asm.S */
/*
* Copyright (C) 2006-2024 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef WOLFSSL_USER_SETTINGS
#ifdef WOLFSSL_USER_SETTINGS_ASM
/*
* user_settings_asm.h is a file generated by the script user_settings_asm.sh.
* The script takes in a user_settings.h and produces user_settings_asm.h, which
* is a stripped down version of user_settings.h containing only preprocessor
* directives. This makes the header safe to include in assembly (.S) files.
*/
#include "user_settings_asm.h"
#else
/*
* Note: if user_settings.h contains any C code (e.g. a typedef or function
* prototype), including it here in an assembly (.S) file will cause an
* assembler failure. See user_settings_asm.h above.
*/
#include "user_settings.h"
#endif /* WOLFSSL_USER_SETTINGS_ASM */
#endif /* WOLFSSL_USER_SETTINGS */
#ifndef HAVE_INTEL_AVX1
#define HAVE_INTEL_AVX1
#endif /* HAVE_INTEL_AVX1 */
#ifndef NO_AVX2_SUPPORT
#define HAVE_INTEL_AVX2
#endif /* NO_AVX2_SUPPORT */
#ifndef __APPLE__
.text
.globl fe_init
.type fe_init,@function
.align 16
fe_init:
#else
.section __TEXT,__text
.globl _fe_init
.p2align 4
_fe_init:
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
#ifndef __APPLE__
movq cpuFlagsSet@GOTPCREL(%rip), %rax
movl (%rax), %eax
#else
movl _cpuFlagsSet(%rip), %eax
#endif /* __APPLE__ */
testl %eax, %eax
je L_fe_init_get_flags
repz retq
L_fe_init_get_flags:
#ifndef __APPLE__
callq cpuid_get_flags@plt
#else
callq _cpuid_get_flags
#endif /* __APPLE__ */
#ifndef __APPLE__
movq intelFlags@GOTPCREL(%rip), %rdx
movl %eax, (%rdx)
#else
movl %eax, _intelFlags(%rip)
#endif /* __APPLE__ */
andl $0x50, %eax
cmpl $0x50, %eax
jne L_fe_init_flags_done
#ifndef __APPLE__
movq fe_mul_avx2@GOTPCREL(%rip), %rax
#else
leaq _fe_mul_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq fe_mul_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _fe_mul_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq fe_sq_avx2@GOTPCREL(%rip), %rax
#else
leaq _fe_sq_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq fe_sq_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _fe_sq_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq fe_mul121666_avx2@GOTPCREL(%rip), %rax
#else
leaq _fe_mul121666_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq fe_mul121666_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _fe_mul121666_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq fe_invert_avx2@GOTPCREL(%rip), %rax
#else
leaq _fe_invert_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq fe_invert_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _fe_invert_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq curve25519_avx2@GOTPCREL(%rip), %rax
#else
leaq _curve25519_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq curve25519_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _curve25519_p(%rip)
#endif /* __APPLE__ */
#ifdef HAVE_ED25519
#ifndef __APPLE__
movq fe_sq2_avx2@GOTPCREL(%rip), %rax
#else
leaq _fe_sq2_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq fe_sq2_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _fe_sq2_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq fe_pow22523_avx2@GOTPCREL(%rip), %rax
#else
leaq _fe_pow22523_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq fe_pow22523_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _fe_pow22523_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_p1p1_to_p2_avx2@GOTPCREL(%rip), %rax
#else
leaq _ge_p1p1_to_p2_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_p1p1_to_p2_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _ge_p1p1_to_p2_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_p1p1_to_p3_avx2@GOTPCREL(%rip), %rax
#else
leaq _ge_p1p1_to_p3_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_p1p1_to_p3_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _ge_p1p1_to_p3_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_p2_dbl_avx2@GOTPCREL(%rip), %rax
#else
leaq _ge_p2_dbl_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_p2_dbl_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _ge_p2_dbl_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_madd_avx2@GOTPCREL(%rip), %rax
#else
leaq _ge_madd_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_madd_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _ge_madd_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_msub_avx2@GOTPCREL(%rip), %rax
#else
leaq _ge_msub_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_msub_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _ge_msub_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_add_avx2@GOTPCREL(%rip), %rax
#else
leaq _ge_add_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_add_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _ge_add_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_sub_avx2@GOTPCREL(%rip), %rax
#else
leaq _ge_sub_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq ge_sub_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _ge_sub_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq sc_reduce_avx2@GOTPCREL(%rip), %rax
#else
leaq _sc_reduce_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq sc_reduce_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _sc_reduce_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
movq sc_muladd_avx2@GOTPCREL(%rip), %rax
#else
leaq _sc_muladd_avx2(%rip), %rax
#endif /* __APPLE__ */
#ifndef __APPLE__
movq sc_muladd_p@GOTPCREL(%rip), %rdx
movq %rax, (%rdx)
#else
movq %rax, _sc_muladd_p(%rip)
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
L_fe_init_flags_done:
#ifndef __APPLE__
movq cpuFlagsSet@GOTPCREL(%rip), %rdx
movl $0x1, (%rdx)
#else
movl $0x1, _cpuFlagsSet(%rip)
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
repz retq
#ifndef __APPLE__
.size fe_init,.-fe_init
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_frombytes
.type fe_frombytes,@function
.align 16
fe_frombytes:
#else
.section __TEXT,__text
.globl _fe_frombytes
.p2align 4
_fe_frombytes:
#endif /* __APPLE__ */
movq $0x7fffffffffffffff, %r9
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
andq %r9, %r8
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
repz retq
#ifndef __APPLE__
.size fe_frombytes,.-fe_frombytes
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_tobytes
.type fe_tobytes,@function
.align 16
fe_tobytes:
#else
.section __TEXT,__text
.globl _fe_tobytes
.p2align 4
_fe_tobytes:
#endif /* __APPLE__ */
movq $0x7fffffffffffffff, %r10
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
addq $19, %rdx
adcq $0x00, %rax
adcq $0x00, %rcx
adcq $0x00, %r8
shrq $63, %r8
imulq $19, %r8, %r9
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
addq %r9, %rdx
adcq $0x00, %rax
adcq $0x00, %rcx
adcq $0x00, %r8
andq %r10, %r8
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
repz retq
#ifndef __APPLE__
.size fe_tobytes,.-fe_tobytes
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_1
.type fe_1,@function
.align 16
fe_1:
#else
.section __TEXT,__text
.globl _fe_1
.p2align 4
_fe_1:
#endif /* __APPLE__ */
# Set one
movq $0x01, (%rdi)
movq $0x00, 8(%rdi)
movq $0x00, 16(%rdi)
movq $0x00, 24(%rdi)
repz retq
#ifndef __APPLE__
.size fe_1,.-fe_1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_0
.type fe_0,@function
.align 16
fe_0:
#else
.section __TEXT,__text
.globl _fe_0
.p2align 4
_fe_0:
#endif /* __APPLE__ */
# Set zero
movq $0x00, (%rdi)
movq $0x00, 8(%rdi)
movq $0x00, 16(%rdi)
movq $0x00, 24(%rdi)
repz retq
#ifndef __APPLE__
.size fe_0,.-fe_0
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_copy
.type fe_copy,@function
.align 16
fe_copy:
#else
.section __TEXT,__text
.globl _fe_copy
.p2align 4
_fe_copy:
#endif /* __APPLE__ */
# Copy
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
repz retq
#ifndef __APPLE__
.size fe_copy,.-fe_copy
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_sub
.type fe_sub,@function
.align 16
fe_sub:
#else
.section __TEXT,__text
.globl _fe_sub
.p2align 4
_fe_sub:
#endif /* __APPLE__ */
pushq %r12
# Sub
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
subq (%rdx), %rax
sbbq 8(%rdx), %rcx
sbbq 16(%rdx), %r8
sbbq 24(%rdx), %r9
sbbq %r11, %r11
shldq $0x01, %r9, %r11
movq $0x7fffffffffffffff, %r12
imulq $-19, %r11
andq %r12, %r9
# Add modulus (if underflow)
subq %r11, %rax
sbbq $0x00, %rcx
sbbq $0x00, %r8
sbbq $0x00, %r9
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
popq %r12
repz retq
#ifndef __APPLE__
.size fe_sub,.-fe_sub
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_add
.type fe_add,@function
.align 16
fe_add:
#else
.section __TEXT,__text
.globl _fe_add
.p2align 4
_fe_add:
#endif /* __APPLE__ */
pushq %r12
# Add
movq (%rsi), %rax
movq 8(%rsi), %rcx
addq (%rdx), %rax
movq 16(%rsi), %r8
adcq 8(%rdx), %rcx
movq 24(%rsi), %r9
adcq 16(%rdx), %r8
adcq 24(%rdx), %r9
movq $0x00, %r11
adcq $0x00, %r11
shldq $0x01, %r9, %r11
movq $0x7fffffffffffffff, %r12
imulq $19, %r11
andq %r12, %r9
# Sub modulus (if overflow)
addq %r11, %rax
adcq $0x00, %rcx
adcq $0x00, %r8
adcq $0x00, %r9
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
popq %r12
repz retq
#ifndef __APPLE__
.size fe_add,.-fe_add
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_neg
.type fe_neg,@function
.align 16
fe_neg:
#else
.section __TEXT,__text
.globl _fe_neg
.p2align 4
_fe_neg:
#endif /* __APPLE__ */
movq $-19, %rdx
movq $-1, %rax
movq $-1, %rcx
movq $0x7fffffffffffffff, %r8
subq (%rsi), %rdx
sbbq 8(%rsi), %rax
sbbq 16(%rsi), %rcx
sbbq 24(%rsi), %r8
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
repz retq
#ifndef __APPLE__
.size fe_neg,.-fe_neg
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_cmov
.type fe_cmov,@function
.align 16
fe_cmov:
#else
.section __TEXT,__text
.globl _fe_cmov
.p2align 4
_fe_cmov:
#endif /* __APPLE__ */
cmpl $0x01, %edx
movq (%rdi), %rcx
movq 8(%rdi), %r8
movq 16(%rdi), %r9
movq 24(%rdi), %r10
cmoveq (%rsi), %rcx
cmoveq 8(%rsi), %r8
cmoveq 16(%rsi), %r9
cmoveq 24(%rsi), %r10
movq %rcx, (%rdi)
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq %r10, 24(%rdi)
repz retq
#ifndef __APPLE__
.size fe_cmov,.-fe_cmov
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_isnonzero
.type fe_isnonzero,@function
.align 16
fe_isnonzero:
#else
.section __TEXT,__text
.globl _fe_isnonzero
.p2align 4
_fe_isnonzero:
#endif /* __APPLE__ */
movq $0x7fffffffffffffff, %r10
movq (%rdi), %rax
movq 8(%rdi), %rdx
movq 16(%rdi), %rcx
movq 24(%rdi), %r8
addq $19, %rax
adcq $0x00, %rdx
adcq $0x00, %rcx
adcq $0x00, %r8
shrq $63, %r8
imulq $19, %r8, %r9
movq (%rdi), %rax
movq 8(%rdi), %rdx
movq 16(%rdi), %rcx
movq 24(%rdi), %r8
addq %r9, %rax
adcq $0x00, %rdx
adcq $0x00, %rcx
adcq $0x00, %r8
andq %r10, %r8
orq %rdx, %rax
orq %rcx, %rax
orq %r8, %rax
repz retq
#ifndef __APPLE__
.size fe_isnonzero,.-fe_isnonzero
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_isnegative
.type fe_isnegative,@function
.align 16
fe_isnegative:
#else
.section __TEXT,__text
.globl _fe_isnegative
.p2align 4
_fe_isnegative:
#endif /* __APPLE__ */
movq $0x7fffffffffffffff, %r11
movq (%rdi), %rdx
movq 8(%rdi), %rcx
movq 16(%rdi), %r8
movq 24(%rdi), %r9
movq %rdx, %rax
addq $19, %rdx
adcq $0x00, %rcx
adcq $0x00, %r8
adcq $0x00, %r9
shrq $63, %r9
imulq $19, %r9, %r10
addq %r10, %rax
andq $0x01, %rax
repz retq
#ifndef __APPLE__
.size fe_isnegative,.-fe_isnegative
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_cmov_table
.type fe_cmov_table,@function
.align 16
fe_cmov_table:
#else
.section __TEXT,__text
.globl _fe_cmov_table
.p2align 4
_fe_cmov_table:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rdx, %rcx
movsbq %cl, %rax
cdq
xorb %dl, %al
subb %dl, %al
movb %al, %r15b
movq $0x01, %rax
xorq %rdx, %rdx
xorq %r8, %r8
xorq %r9, %r9
movq $0x01, %r10
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
cmpb $0x01, %r15b
movq (%rsi), %r14
cmoveq %r14, %rax
movq 8(%rsi), %r14
cmoveq %r14, %rdx
movq 16(%rsi), %r14
cmoveq %r14, %r8
movq 24(%rsi), %r14
cmoveq %r14, %r9
movq 32(%rsi), %r14
cmoveq %r14, %r10
movq 40(%rsi), %r14
cmoveq %r14, %r11
movq 48(%rsi), %r14
cmoveq %r14, %r12
movq 56(%rsi), %r14
cmoveq %r14, %r13
cmpb $2, %r15b
movq 96(%rsi), %r14
cmoveq %r14, %rax
movq 104(%rsi), %r14
cmoveq %r14, %rdx
movq 112(%rsi), %r14
cmoveq %r14, %r8
movq 120(%rsi), %r14
cmoveq %r14, %r9
movq 128(%rsi), %r14
cmoveq %r14, %r10
movq 136(%rsi), %r14
cmoveq %r14, %r11
movq 144(%rsi), %r14
cmoveq %r14, %r12
movq 152(%rsi), %r14
cmoveq %r14, %r13
cmpb $3, %r15b
movq 192(%rsi), %r14
cmoveq %r14, %rax
movq 200(%rsi), %r14
cmoveq %r14, %rdx
movq 208(%rsi), %r14
cmoveq %r14, %r8
movq 216(%rsi), %r14
cmoveq %r14, %r9
movq 224(%rsi), %r14
cmoveq %r14, %r10
movq 232(%rsi), %r14
cmoveq %r14, %r11
movq 240(%rsi), %r14
cmoveq %r14, %r12
movq 248(%rsi), %r14
cmoveq %r14, %r13
cmpb $4, %r15b
movq 288(%rsi), %r14
cmoveq %r14, %rax
movq 296(%rsi), %r14
cmoveq %r14, %rdx
movq 304(%rsi), %r14
cmoveq %r14, %r8
movq 312(%rsi), %r14
cmoveq %r14, %r9
movq 320(%rsi), %r14
cmoveq %r14, %r10
movq 328(%rsi), %r14
cmoveq %r14, %r11
movq 336(%rsi), %r14
cmoveq %r14, %r12
movq 344(%rsi), %r14
cmoveq %r14, %r13
cmpb $5, %r15b
movq 384(%rsi), %r14
cmoveq %r14, %rax
movq 392(%rsi), %r14
cmoveq %r14, %rdx
movq 400(%rsi), %r14
cmoveq %r14, %r8
movq 408(%rsi), %r14
cmoveq %r14, %r9
movq 416(%rsi), %r14
cmoveq %r14, %r10
movq 424(%rsi), %r14
cmoveq %r14, %r11
movq 432(%rsi), %r14
cmoveq %r14, %r12
movq 440(%rsi), %r14
cmoveq %r14, %r13
cmpb $6, %r15b
movq 480(%rsi), %r14
cmoveq %r14, %rax
movq 488(%rsi), %r14
cmoveq %r14, %rdx
movq 496(%rsi), %r14
cmoveq %r14, %r8
movq 504(%rsi), %r14
cmoveq %r14, %r9
movq 512(%rsi), %r14
cmoveq %r14, %r10
movq 520(%rsi), %r14
cmoveq %r14, %r11
movq 528(%rsi), %r14
cmoveq %r14, %r12
movq 536(%rsi), %r14
cmoveq %r14, %r13
cmpb $7, %r15b
movq 576(%rsi), %r14
cmoveq %r14, %rax
movq 584(%rsi), %r14
cmoveq %r14, %rdx
movq 592(%rsi), %r14
cmoveq %r14, %r8
movq 600(%rsi), %r14
cmoveq %r14, %r9
movq 608(%rsi), %r14
cmoveq %r14, %r10
movq 616(%rsi), %r14
cmoveq %r14, %r11
movq 624(%rsi), %r14
cmoveq %r14, %r12
movq 632(%rsi), %r14
cmoveq %r14, %r13
cmpb $8, %r15b
movq 672(%rsi), %r14
cmoveq %r14, %rax
movq 680(%rsi), %r14
cmoveq %r14, %rdx
movq 688(%rsi), %r14
cmoveq %r14, %r8
movq 696(%rsi), %r14
cmoveq %r14, %r9
movq 704(%rsi), %r14
cmoveq %r14, %r10
movq 712(%rsi), %r14
cmoveq %r14, %r11
movq 720(%rsi), %r14
cmoveq %r14, %r12
movq 728(%rsi), %r14
cmoveq %r14, %r13
cmpb $0x00, %cl
movq %rax, %r14
cmovlq %r10, %rax
cmovlq %r14, %r10
movq %rdx, %r14
cmovlq %r11, %rdx
cmovlq %r14, %r11
movq %r8, %r14
cmovlq %r12, %r8
cmovlq %r14, %r12
movq %r9, %r14
cmovlq %r13, %r9
cmovlq %r14, %r13
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq %r10, 32(%rdi)
movq %r11, 40(%rdi)
movq %r12, 48(%rdi)
movq %r13, 56(%rdi)
xorq %rax, %rax
xorq %rdx, %rdx
xorq %r8, %r8
xorq %r9, %r9
cmpb $0x01, %r15b
movq 64(%rsi), %r14
cmoveq %r14, %rax
movq 72(%rsi), %r14
cmoveq %r14, %rdx
movq 80(%rsi), %r14
cmoveq %r14, %r8
movq 88(%rsi), %r14
cmoveq %r14, %r9
cmpb $2, %r15b
movq 160(%rsi), %r14
cmoveq %r14, %rax
movq 168(%rsi), %r14
cmoveq %r14, %rdx
movq 176(%rsi), %r14
cmoveq %r14, %r8
movq 184(%rsi), %r14
cmoveq %r14, %r9
cmpb $3, %r15b
movq 256(%rsi), %r14
cmoveq %r14, %rax
movq 264(%rsi), %r14
cmoveq %r14, %rdx
movq 272(%rsi), %r14
cmoveq %r14, %r8
movq 280(%rsi), %r14
cmoveq %r14, %r9
cmpb $4, %r15b
movq 352(%rsi), %r14
cmoveq %r14, %rax
movq 360(%rsi), %r14
cmoveq %r14, %rdx
movq 368(%rsi), %r14
cmoveq %r14, %r8
movq 376(%rsi), %r14
cmoveq %r14, %r9
cmpb $5, %r15b
movq 448(%rsi), %r14
cmoveq %r14, %rax
movq 456(%rsi), %r14
cmoveq %r14, %rdx
movq 464(%rsi), %r14
cmoveq %r14, %r8
movq 472(%rsi), %r14
cmoveq %r14, %r9
cmpb $6, %r15b
movq 544(%rsi), %r14
cmoveq %r14, %rax
movq 552(%rsi), %r14
cmoveq %r14, %rdx
movq 560(%rsi), %r14
cmoveq %r14, %r8
movq 568(%rsi), %r14
cmoveq %r14, %r9
cmpb $7, %r15b
movq 640(%rsi), %r14
cmoveq %r14, %rax
movq 648(%rsi), %r14
cmoveq %r14, %rdx
movq 656(%rsi), %r14
cmoveq %r14, %r8
movq 664(%rsi), %r14
cmoveq %r14, %r9
cmpb $8, %r15b
movq 736(%rsi), %r14
cmoveq %r14, %rax
movq 744(%rsi), %r14
cmoveq %r14, %rdx
movq 752(%rsi), %r14
cmoveq %r14, %r8
movq 760(%rsi), %r14
cmoveq %r14, %r9
movq $-19, %r10
movq $-1, %r11
movq $-1, %r12
movq $0x7fffffffffffffff, %r13
subq %rax, %r10
sbbq %rdx, %r11
sbbq %r8, %r12
sbbq %r9, %r13
cmpb $0x00, %cl
cmovlq %r10, %rax
cmovlq %r11, %rdx
cmovlq %r12, %r8
cmovlq %r13, %r9
movq %rax, 64(%rdi)
movq %rdx, 72(%rdi)
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size fe_cmov_table,.-fe_cmov_table
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_mul
.type fe_mul,@function
.align 16
fe_mul:
#else
.section __TEXT,__text
.globl _fe_mul
.p2align 4
_fe_mul:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *fe_mul_p(%rip)
#else
jmpq *_fe_mul_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size fe_mul,.-fe_mul
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_sq
.type fe_sq,@function
.align 16
fe_sq:
#else
.section __TEXT,__text
.globl _fe_sq
.p2align 4
_fe_sq:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *fe_sq_p(%rip)
#else
jmpq *_fe_sq_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size fe_sq,.-fe_sq
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_mul121666
.type fe_mul121666,@function
.align 16
fe_mul121666:
#else
.section __TEXT,__text
.globl _fe_mul121666
.p2align 4
_fe_mul121666:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *fe_mul121666_p(%rip)
#else
jmpq *_fe_mul121666_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size fe_mul121666,.-fe_mul121666
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_invert
.type fe_invert,@function
.align 16
fe_invert:
#else
.section __TEXT,__text
.globl _fe_invert
.p2align 4
_fe_invert:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *fe_invert_p(%rip)
#else
jmpq *_fe_invert_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size fe_invert,.-fe_invert
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl curve25519
.type curve25519,@function
.align 16
curve25519:
#else
.section __TEXT,__text
.globl _curve25519
.p2align 4
_curve25519:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *curve25519_p(%rip)
#else
jmpq *_curve25519_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size curve25519,.-curve25519
#endif /* __APPLE__ */
#ifdef HAVE_ED25519
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl fe_sq2
.type fe_sq2,@function
.align 16
fe_sq2:
#else
.section __TEXT,__text
.globl _fe_sq2
.p2align 4
_fe_sq2:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *fe_sq2_p(%rip)
#else
jmpq *_fe_sq2_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size fe_sq2,.-fe_sq2
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl fe_pow22523
.type fe_pow22523,@function
.align 16
fe_pow22523:
#else
.section __TEXT,__text
.globl _fe_pow22523
.p2align 4
_fe_pow22523:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *fe_pow22523_p(%rip)
#else
jmpq *_fe_pow22523_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size fe_pow22523,.-fe_pow22523
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl ge_p1p1_to_p2
.type ge_p1p1_to_p2,@function
.align 16
ge_p1p1_to_p2:
#else
.section __TEXT,__text
.globl _ge_p1p1_to_p2
.p2align 4
_ge_p1p1_to_p2:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *ge_p1p1_to_p2_p(%rip)
#else
jmpq *_ge_p1p1_to_p2_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size ge_p1p1_to_p2,.-ge_p1p1_to_p2
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl ge_p1p1_to_p3
.type ge_p1p1_to_p3,@function
.align 16
ge_p1p1_to_p3:
#else
.section __TEXT,__text
.globl _ge_p1p1_to_p3
.p2align 4
_ge_p1p1_to_p3:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *ge_p1p1_to_p3_p(%rip)
#else
jmpq *_ge_p1p1_to_p3_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size ge_p1p1_to_p3,.-ge_p1p1_to_p3
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl ge_p2_dbl
.type ge_p2_dbl,@function
.align 16
ge_p2_dbl:
#else
.section __TEXT,__text
.globl _ge_p2_dbl
.p2align 4
_ge_p2_dbl:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *ge_p2_dbl_p(%rip)
#else
jmpq *_ge_p2_dbl_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size ge_p2_dbl,.-ge_p2_dbl
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl ge_madd
.type ge_madd,@function
.align 16
ge_madd:
#else
.section __TEXT,__text
.globl _ge_madd
.p2align 4
_ge_madd:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *ge_madd_p(%rip)
#else
jmpq *_ge_madd_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size ge_madd,.-ge_madd
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl ge_msub
.type ge_msub,@function
.align 16
ge_msub:
#else
.section __TEXT,__text
.globl _ge_msub
.p2align 4
_ge_msub:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *ge_msub_p(%rip)
#else
jmpq *_ge_msub_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size ge_msub,.-ge_msub
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl ge_add
.type ge_add,@function
.align 16
ge_add:
#else
.section __TEXT,__text
.globl _ge_add
.p2align 4
_ge_add:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *ge_add_p(%rip)
#else
jmpq *_ge_add_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size ge_add,.-ge_add
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl ge_sub
.type ge_sub,@function
.align 16
ge_sub:
#else
.section __TEXT,__text
.globl _ge_sub
.p2align 4
_ge_sub:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *ge_sub_p(%rip)
#else
jmpq *_ge_sub_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size ge_sub,.-ge_sub
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl sc_reduce
.type sc_reduce,@function
.align 16
sc_reduce:
#else
.section __TEXT,__text
.globl _sc_reduce
.p2align 4
_sc_reduce:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *sc_reduce_p(%rip)
#else
jmpq *_sc_reduce_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size sc_reduce,.-sc_reduce
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl sc_muladd
.type sc_muladd,@function
.align 16
sc_muladd:
#else
.section __TEXT,__text
.globl _sc_muladd
.p2align 4
_sc_muladd:
#endif /* __APPLE__ */
#ifndef __APPLE__
jmpq *sc_muladd_p(%rip)
#else
jmpq *_sc_muladd_p(%rip)
#endif /* __APPLE__ */
#ifndef __APPLE__
.size sc_muladd,.-sc_muladd
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#endif /* HAVE_ED25519 */
#ifndef __APPLE__
.data
.type cpuFlagsSet, @object
.size cpuFlagsSet,4
cpuFlagsSet:
.long 0
#else
.section __DATA,__data
.p2align 3
_cpuFlagsSet:
.long 0
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type intelFlags, @object
.size intelFlags,4
intelFlags:
.long 0
#else
.section __DATA,__data
.p2align 3
_intelFlags:
.long 0
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type fe_mul_p, @object
.size fe_mul_p,8
fe_mul_p:
.quad fe_mul_x64
#else
.section __DATA,__data
.p2align 3
_fe_mul_p:
.quad _fe_mul_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type fe_sq_p, @object
.size fe_sq_p,8
fe_sq_p:
.quad fe_sq_x64
#else
.section __DATA,__data
.p2align 3
_fe_sq_p:
.quad _fe_sq_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type fe_mul121666_p, @object
.size fe_mul121666_p,8
fe_mul121666_p:
.quad fe_mul121666_x64
#else
.section __DATA,__data
.p2align 3
_fe_mul121666_p:
.quad _fe_mul121666_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type fe_invert_p, @object
.size fe_invert_p,8
fe_invert_p:
.quad fe_invert_x64
#else
.section __DATA,__data
.p2align 3
_fe_invert_p:
.quad _fe_invert_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type curve25519_p, @object
.size curve25519_p,8
curve25519_p:
.quad curve25519_x64
#else
.section __DATA,__data
.p2align 3
_curve25519_p:
.quad _curve25519_x64
#endif /* __APPLE__ */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.data
.type fe_sq2_p, @object
.size fe_sq2_p,8
fe_sq2_p:
.quad fe_sq2_x64
#else
.section __DATA,__data
.p2align 3
_fe_sq2_p:
.quad _fe_sq2_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type fe_pow22523_p, @object
.size fe_pow22523_p,8
fe_pow22523_p:
.quad fe_pow22523_x64
#else
.section __DATA,__data
.p2align 3
_fe_pow22523_p:
.quad _fe_pow22523_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type ge_p1p1_to_p2_p, @object
.size ge_p1p1_to_p2_p,8
ge_p1p1_to_p2_p:
.quad ge_p1p1_to_p2_x64
#else
.section __DATA,__data
.p2align 3
_ge_p1p1_to_p2_p:
.quad _ge_p1p1_to_p2_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type ge_p1p1_to_p3_p, @object
.size ge_p1p1_to_p3_p,8
ge_p1p1_to_p3_p:
.quad ge_p1p1_to_p3_x64
#else
.section __DATA,__data
.p2align 3
_ge_p1p1_to_p3_p:
.quad _ge_p1p1_to_p3_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type ge_p2_dbl_p, @object
.size ge_p2_dbl_p,8
ge_p2_dbl_p:
.quad ge_p2_dbl_x64
#else
.section __DATA,__data
.p2align 3
_ge_p2_dbl_p:
.quad _ge_p2_dbl_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type ge_madd_p, @object
.size ge_madd_p,8
ge_madd_p:
.quad ge_madd_x64
#else
.section __DATA,__data
.p2align 3
_ge_madd_p:
.quad _ge_madd_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type ge_msub_p, @object
.size ge_msub_p,8
ge_msub_p:
.quad ge_msub_x64
#else
.section __DATA,__data
.p2align 3
_ge_msub_p:
.quad _ge_msub_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type ge_add_p, @object
.size ge_add_p,8
ge_add_p:
.quad ge_add_x64
#else
.section __DATA,__data
.p2align 3
_ge_add_p:
.quad _ge_add_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type ge_sub_p, @object
.size ge_sub_p,8
ge_sub_p:
.quad ge_sub_x64
#else
.section __DATA,__data
.p2align 3
_ge_sub_p:
.quad _ge_sub_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type sc_reduce_p, @object
.size sc_reduce_p,8
sc_reduce_p:
.quad sc_reduce_x64
#else
.section __DATA,__data
.p2align 3
_sc_reduce_p:
.quad _sc_reduce_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
.type sc_muladd_p, @object
.size sc_muladd_p,8
sc_muladd_p:
.quad sc_muladd_x64
#else
.section __DATA,__data
.p2align 3
_sc_muladd_p:
.quad _sc_muladd_x64
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#ifndef __APPLE__
.text
.globl fe_mul_x64
.type fe_mul_x64,@function
.align 16
fe_mul_x64:
#else
.section __TEXT,__text
.globl _fe_mul_x64
.p2align 4
_fe_mul_x64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
movq %rdx, %rcx
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
movq %rax, %r8
movq %rdx, %r9
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r9
adcq %rdx, %r10
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
xorq %r11, %r11
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r11
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r14
adcq %rdx, %r15
movq $38, %rax
mulq %r15
addq %rax, %r11
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %rbx
shldq $0x01, %r11, %rdx
imulq $19, %rdx, %rdx
andq %rbx, %r11
movq %rdx, %rbx
movq $38, %rax
mulq %r12
xorq %r12, %r12
addq %rax, %r8
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
adcq %rdx, %r14
addq %rbx, %r8
adcq %r12, %r9
adcq %r13, %r10
adcq %r14, %r11
movq $0x7fffffffffffffff, %rbx
movq %r11, %rax
sarq $63, %rax
andq $19, %rax
andq %rbx, %r11
addq %rax, %r8
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
# Store
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size fe_mul_x64,.-fe_mul_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_sq_x64
.type fe_sq_x64,@function
.align 16
fe_sq_x64:
#else
.section __TEXT,__text
.globl _fe_sq_x64
.p2align 4
_fe_sq_x64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
# Square
# A[0] * A[1]
movq (%rsi), %rax
mulq 8(%rsi)
movq %rax, %r8
movq %rdx, %r9
# A[0] * A[2]
movq (%rsi), %rax
mulq 16(%rsi)
xorq %r10, %r10
addq %rax, %r9
adcq %rdx, %r10
# A[0] * A[3]
movq (%rsi), %rax
mulq 24(%rsi)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[1] * A[2]
movq 8(%rsi), %rax
mulq 16(%rsi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[1] * A[3]
movq 8(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r11
adcq %rdx, %r12
# A[2] * A[3]
movq 16(%rsi), %rax
mulq 24(%rsi)
xorq %r13, %r13
addq %rax, %r12
adcq %rdx, %r13
# Double
xorq %r14, %r14
addq %r8, %r8
adcq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq $0x00, %r14
# A[0] * A[0]
movq (%rsi), %rax
mulq %rax
movq %rax, %rcx
movq %rdx, %r15
# A[1] * A[1]
movq 8(%rsi), %rax
mulq %rax
addq %r15, %r8
adcq %rax, %r9
adcq $0x00, %rdx
movq %rdx, %r15
# A[2] * A[2]
movq 16(%rsi), %rax
mulq %rax
addq %r15, %r10
adcq %rax, %r11
adcq $0x00, %rdx
movq %rdx, %r15
# A[3] * A[3]
movq 24(%rsi), %rax
mulq %rax
addq %rax, %r13
adcq %rdx, %r14
addq %r15, %r12
adcq $0x00, %r13
adcq $0x00, %r14
movq $38, %rax
mulq %r14
addq %rax, %r10
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r15
shldq $0x01, %r10, %rdx
imulq $19, %rdx, %rdx
andq %r15, %r10
movq %rdx, %r15
movq $38, %rax
mulq %r11
xorq %r11, %r11
addq %rax, %rcx
movq $38, %rax
adcq %rdx, %r11
mulq %r12
xorq %r12, %r12
addq %rax, %r8
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
adcq %rdx, %r13
addq %r15, %rcx
adcq %r11, %r8
adcq %r12, %r9
adcq %r13, %r10
movq $0x7fffffffffffffff, %r15
movq %r10, %rax
sarq $63, %rax
andq $19, %rax
andq %r15, %r10
addq %rax, %rcx
adcq $0x00, %r8
adcq $0x00, %r9
adcq $0x00, %r10
# Store
movq %rcx, (%rdi)
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq %r10, 24(%rdi)
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size fe_sq_x64,.-fe_sq_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_sq_n_x64
.type fe_sq_n_x64,@function
.align 16
fe_sq_n_x64:
#else
.section __TEXT,__text
.globl _fe_sq_n_x64
.p2align 4
_fe_sq_n_x64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
movq %rdx, %rcx
L_fe_sq_n_x64:
# Square
# A[0] * A[1]
movq (%rsi), %rax
mulq 8(%rsi)
movq %rax, %r9
movq %rdx, %r10
# A[0] * A[2]
movq (%rsi), %rax
mulq 16(%rsi)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[0] * A[3]
movq (%rsi), %rax
mulq 24(%rsi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * A[2]
movq 8(%rsi), %rax
mulq 16(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * A[3]
movq 8(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
# A[2] * A[3]
movq 16(%rsi), %rax
mulq 24(%rsi)
xorq %r14, %r14
addq %rax, %r13
adcq %rdx, %r14
# Double
xorq %r15, %r15
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq $0x00, %r15
# A[0] * A[0]
movq (%rsi), %rax
mulq %rax
movq %rax, %r8
movq %rdx, %rbx
# A[1] * A[1]
movq 8(%rsi), %rax
mulq %rax
addq %rbx, %r9
adcq %rax, %r10
adcq $0x00, %rdx
movq %rdx, %rbx
# A[2] * A[2]
movq 16(%rsi), %rax
mulq %rax
addq %rbx, %r11
adcq %rax, %r12
adcq $0x00, %rdx
movq %rdx, %rbx
# A[3] * A[3]
movq 24(%rsi), %rax
mulq %rax
addq %rax, %r14
adcq %rdx, %r15
addq %rbx, %r13
adcq $0x00, %r14
adcq $0x00, %r15
movq $38, %rax
mulq %r15
addq %rax, %r11
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %rbx
shldq $0x01, %r11, %rdx
imulq $19, %rdx, %rdx
andq %rbx, %r11
movq %rdx, %rbx
movq $38, %rax
mulq %r12
xorq %r12, %r12
addq %rax, %r8
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
adcq %rdx, %r14
addq %rbx, %r8
adcq %r12, %r9
adcq %r13, %r10
adcq %r14, %r11
# Store
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
decb %cl
jnz L_fe_sq_n_x64
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size fe_sq_n_x64,.-fe_sq_n_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_mul121666_x64
.type fe_mul121666_x64,@function
.align 16
fe_mul121666_x64:
#else
.section __TEXT,__text
.globl _fe_mul121666_x64
.p2align 4
_fe_mul121666_x64:
#endif /* __APPLE__ */
pushq %r12
# Multiply by 121666
movq $0x1db42, %rax
mulq (%rsi)
xorq %r10, %r10
movq %rax, %r8
movq %rdx, %r9
movq $0x1db42, %rax
mulq 8(%rsi)
xorq %r11, %r11
addq %rax, %r9
adcq %rdx, %r10
movq $0x1db42, %rax
mulq 16(%rsi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
movq $0x1db42, %rax
mulq 24(%rsi)
movq $0x7fffffffffffffff, %rcx
addq %rax, %r11
adcq %rdx, %r12
shldq $0x01, %r11, %r12
andq %rcx, %r11
movq $19, %rax
mulq %r12
addq %rax, %r8
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
popq %r12
repz retq
#ifndef __APPLE__
.size fe_mul121666_x64,.-fe_mul121666_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_invert_x64
.type fe_invert_x64,@function
.align 16
fe_invert_x64:
#else
.section __TEXT,__text
.globl _fe_invert_x64
.p2align 4
_fe_invert_x64:
#endif /* __APPLE__ */
subq $0x90, %rsp
# Invert
movq %rdi, 128(%rsp)
movq %rsi, 136(%rsp)
movq %rsp, %rdi
movq 136(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq 136(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
movq %rsp, %rdi
movq %rsp, %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $4, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $9, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $19, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $9, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $49, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $0x63, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $49, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq $4, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
movq 128(%rsp), %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
movq 136(%rsp), %rsi
movq 128(%rsp), %rdi
addq $0x90, %rsp
repz retq
#ifndef __APPLE__
.text
.globl curve25519_x64
.type curve25519_x64,@function
.align 16
curve25519_x64:
#else
.section __TEXT,__text
.globl _curve25519_x64
.p2align 4
_curve25519_x64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %r8
subq $0xb0, %rsp
xorq %rbx, %rbx
movq %rdi, 168(%rsp)
# Set one
movq $0x01, (%rdi)
movq $0x00, 8(%rdi)
movq $0x00, 16(%rdi)
movq $0x00, 24(%rdi)
# Set zero
movq $0x00, (%rsp)
movq $0x00, 8(%rsp)
movq $0x00, 16(%rsp)
movq $0x00, 24(%rsp)
# Set one
movq $0x01, 32(%rsp)
movq $0x00, 40(%rsp)
movq $0x00, 48(%rsp)
movq $0x00, 56(%rsp)
# Copy
movq (%r8), %rcx
movq 8(%r8), %r9
movq 16(%r8), %r10
movq 24(%r8), %r11
movq %rcx, 64(%rsp)
movq %r9, 72(%rsp)
movq %r10, 80(%rsp)
movq %r11, 88(%rsp)
movq $0xfe, %r9
L_curve25519_x64_bits:
movq %r9, 160(%rsp)
movq %r9, %rcx
andq $63, %rcx
shrq $6, %r9
movq (%rsi,%r9,8), %rbp
shrq %cl, %rbp
andq $0x01, %rbp
xorq %rbp, %rbx
negq %rbx
# Conditional Swap
movq (%rdi), %rcx
movq 8(%rdi), %r9
movq 16(%rdi), %r10
movq 24(%rdi), %r11
xorq 64(%rsp), %rcx
xorq 72(%rsp), %r9
xorq 80(%rsp), %r10
xorq 88(%rsp), %r11
andq %rbx, %rcx
andq %rbx, %r9
andq %rbx, %r10
andq %rbx, %r11
xorq %rcx, (%rdi)
xorq %r9, 8(%rdi)
xorq %r10, 16(%rdi)
xorq %r11, 24(%rdi)
xorq %rcx, 64(%rsp)
xorq %r9, 72(%rsp)
xorq %r10, 80(%rsp)
xorq %r11, 88(%rsp)
# Conditional Swap
movq (%rsp), %rcx
movq 8(%rsp), %r9
movq 16(%rsp), %r10
movq 24(%rsp), %r11
xorq 32(%rsp), %rcx
xorq 40(%rsp), %r9
xorq 48(%rsp), %r10
xorq 56(%rsp), %r11
andq %rbx, %rcx
andq %rbx, %r9
andq %rbx, %r10
andq %rbx, %r11
xorq %rcx, (%rsp)
xorq %r9, 8(%rsp)
xorq %r10, 16(%rsp)
xorq %r11, 24(%rsp)
xorq %rcx, 32(%rsp)
xorq %r9, 40(%rsp)
xorq %r10, 48(%rsp)
xorq %r11, 56(%rsp)
movq %rbp, %rbx
# Add-Sub
# Add
movq (%rdi), %rcx
movq 8(%rdi), %r9
movq 16(%rdi), %r10
movq 24(%rdi), %r11
movq %rcx, %r12
addq (%rsp), %rcx
movq %r9, %r13
adcq 8(%rsp), %r9
movq %r10, %r14
adcq 16(%rsp), %r10
movq %r11, %r15
adcq 24(%rsp), %r11
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r11, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r11
# Sub modulus (if overflow)
addq %rax, %rcx
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
# Sub
subq (%rsp), %r12
sbbq 8(%rsp), %r13
sbbq 16(%rsp), %r14
sbbq 24(%rsp), %r15
sbbq %rax, %rax
shldq $0x01, %r15, %rax
imulq $-19, %rax
andq %rdx, %r15
# Add modulus (if underflow)
subq %rax, %r12
sbbq $0x00, %r13
sbbq $0x00, %r14
sbbq $0x00, %r15
movq %rcx, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq %r12, 128(%rsp)
movq %r13, 136(%rsp)
movq %r14, 144(%rsp)
movq %r15, 152(%rsp)
# Add-Sub
# Add
movq 64(%rsp), %rcx
movq 72(%rsp), %r9
movq 80(%rsp), %r10
movq 88(%rsp), %r11
movq %rcx, %r12
addq 32(%rsp), %rcx
movq %r9, %r13
adcq 40(%rsp), %r9
movq %r10, %r14
adcq 48(%rsp), %r10
movq %r11, %r15
adcq 56(%rsp), %r11
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r11, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r11
# Sub modulus (if overflow)
addq %rax, %rcx
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
# Sub
subq 32(%rsp), %r12
sbbq 40(%rsp), %r13
sbbq 48(%rsp), %r14
sbbq 56(%rsp), %r15
sbbq %rax, %rax
shldq $0x01, %r15, %rax
imulq $-19, %rax
andq %rdx, %r15
# Add modulus (if underflow)
subq %rax, %r12
sbbq $0x00, %r13
sbbq $0x00, %r14
sbbq $0x00, %r15
movq %rcx, 32(%rsp)
movq %r9, 40(%rsp)
movq %r10, 48(%rsp)
movq %r11, 56(%rsp)
movq %r12, 96(%rsp)
movq %r13, 104(%rsp)
movq %r14, 112(%rsp)
movq %r15, 120(%rsp)
# Multiply
# A[0] * B[0]
movq 128(%rsp), %rax
mulq 32(%rsp)
movq %rax, %rcx
movq %rdx, %r9
# A[0] * B[1]
movq 136(%rsp), %rax
mulq 32(%rsp)
xorq %r10, %r10
addq %rax, %r9
adcq %rdx, %r10
# A[1] * B[0]
movq 128(%rsp), %rax
mulq 40(%rsp)
xorq %r11, %r11
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r11
# A[0] * B[2]
movq 144(%rsp), %rax
mulq 32(%rsp)
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[1]
movq 136(%rsp), %rax
mulq 40(%rsp)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * B[0]
movq 128(%rsp), %rax
mulq 48(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[3]
movq 152(%rsp), %rax
mulq 32(%rsp)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[2]
movq 144(%rsp), %rax
mulq 40(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[1]
movq 136(%rsp), %rax
mulq 48(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * B[0]
movq 128(%rsp), %rax
mulq 56(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[3]
movq 152(%rsp), %rax
mulq 40(%rsp)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[2]
movq 144(%rsp), %rax
mulq 48(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[1]
movq 136(%rsp), %rax
mulq 56(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[3]
movq 152(%rsp), %rax
mulq 48(%rsp)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[2]
movq 144(%rsp), %rax
mulq 56(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[3]
movq 152(%rsp), %rax
mulq 56(%rsp)
addq %rax, %r14
adcq %rdx, %r15
movq $38, %rax
mulq %r15
addq %rax, %r11
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %rbp
shldq $0x01, %r11, %rdx
imulq $19, %rdx, %rdx
andq %rbp, %r11
movq %rdx, %rbp
movq $38, %rax
mulq %r12
xorq %r12, %r12
addq %rax, %rcx
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
adcq %rdx, %r14
addq %rbp, %rcx
adcq %r12, %r9
adcq %r13, %r10
adcq %r14, %r11
# Store
movq %rcx, 32(%rsp)
movq %r9, 40(%rsp)
movq %r10, 48(%rsp)
movq %r11, 56(%rsp)
# Multiply
# A[0] * B[0]
movq (%rdi), %rax
mulq 96(%rsp)
movq %rax, %rcx
movq %rdx, %r9
# A[0] * B[1]
movq 8(%rdi), %rax
mulq 96(%rsp)
xorq %r10, %r10
addq %rax, %r9
adcq %rdx, %r10
# A[1] * B[0]
movq (%rdi), %rax
mulq 104(%rsp)
xorq %r11, %r11
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r11
# A[0] * B[2]
movq 16(%rdi), %rax
mulq 96(%rsp)
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[1]
movq 8(%rdi), %rax
mulq 104(%rsp)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * B[0]
movq (%rdi), %rax
mulq 112(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[3]
movq 24(%rdi), %rax
mulq 96(%rsp)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[2]
movq 16(%rdi), %rax
mulq 104(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[1]
movq 8(%rdi), %rax
mulq 112(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * B[0]
movq (%rdi), %rax
mulq 120(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[3]
movq 24(%rdi), %rax
mulq 104(%rsp)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[2]
movq 16(%rdi), %rax
mulq 112(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[1]
movq 8(%rdi), %rax
mulq 120(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[3]
movq 24(%rdi), %rax
mulq 112(%rsp)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[2]
movq 16(%rdi), %rax
mulq 120(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[3]
movq 24(%rdi), %rax
mulq 120(%rsp)
addq %rax, %r14
adcq %rdx, %r15
movq $38, %rax
mulq %r15
addq %rax, %r11
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %rbp
shldq $0x01, %r11, %rdx
imulq $19, %rdx, %rdx
andq %rbp, %r11
movq %rdx, %rbp
movq $38, %rax
mulq %r12
xorq %r12, %r12
addq %rax, %rcx
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
adcq %rdx, %r14
addq %rbp, %rcx
adcq %r12, %r9
adcq %r13, %r10
adcq %r14, %r11
# Store
movq %rcx, (%rsp)
movq %r9, 8(%rsp)
movq %r10, 16(%rsp)
movq %r11, 24(%rsp)
# Square
# A[0] * A[1]
movq 128(%rsp), %rax
mulq 136(%rsp)
movq %rax, %r9
movq %rdx, %r10
# A[0] * A[2]
movq 128(%rsp), %rax
mulq 144(%rsp)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[0] * A[3]
movq 128(%rsp), %rax
mulq 152(%rsp)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * A[2]
movq 136(%rsp), %rax
mulq 144(%rsp)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * A[3]
movq 136(%rsp), %rax
mulq 152(%rsp)
addq %rax, %r12
adcq %rdx, %r13
# A[2] * A[3]
movq 144(%rsp), %rax
mulq 152(%rsp)
xorq %r14, %r14
addq %rax, %r13
adcq %rdx, %r14
# Double
xorq %r15, %r15
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq $0x00, %r15
# A[0] * A[0]
movq 128(%rsp), %rax
mulq %rax
movq %rax, %rcx
movq %rdx, %rbp
# A[1] * A[1]
movq 136(%rsp), %rax
mulq %rax
addq %rbp, %r9
adcq %rax, %r10
adcq $0x00, %rdx
movq %rdx, %rbp
# A[2] * A[2]
movq 144(%rsp), %rax
mulq %rax
addq %rbp, %r11
adcq %rax, %r12
adcq $0x00, %rdx
movq %rdx, %rbp
# A[3] * A[3]
movq 152(%rsp), %rax
mulq %rax
addq %rax, %r14
adcq %rdx, %r15
addq %rbp, %r13
adcq $0x00, %r14
adcq $0x00, %r15
movq $38, %rax
mulq %r15
addq %rax, %r11
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %rbp
shldq $0x01, %r11, %rdx
imulq $19, %rdx, %rdx
andq %rbp, %r11
movq %rdx, %rbp
movq $38, %rax
mulq %r12
xorq %r12, %r12
addq %rax, %rcx
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
adcq %rdx, %r14
addq %rbp, %rcx
adcq %r12, %r9
adcq %r13, %r10
adcq %r14, %r11
# Store
movq %rcx, 96(%rsp)
movq %r9, 104(%rsp)
movq %r10, 112(%rsp)
movq %r11, 120(%rsp)
# Square
# A[0] * A[1]
movq (%rdi), %rax
mulq 8(%rdi)
movq %rax, %r9
movq %rdx, %r10
# A[0] * A[2]
movq (%rdi), %rax
mulq 16(%rdi)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[0] * A[3]
movq (%rdi), %rax
mulq 24(%rdi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * A[2]
movq 8(%rdi), %rax
mulq 16(%rdi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * A[3]
movq 8(%rdi), %rax
mulq 24(%rdi)
addq %rax, %r12
adcq %rdx, %r13
# A[2] * A[3]
movq 16(%rdi), %rax
mulq 24(%rdi)
xorq %r14, %r14
addq %rax, %r13
adcq %rdx, %r14
# Double
xorq %r15, %r15
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq $0x00, %r15
# A[0] * A[0]
movq (%rdi), %rax
mulq %rax
movq %rax, %rcx
movq %rdx, %rbp
# A[1] * A[1]
movq 8(%rdi), %rax
mulq %rax
addq %rbp, %r9
adcq %rax, %r10
adcq $0x00, %rdx
movq %rdx, %rbp
# A[2] * A[2]
movq 16(%rdi), %rax
mulq %rax
addq %rbp, %r11
adcq %rax, %r12
adcq $0x00, %rdx
movq %rdx, %rbp
# A[3] * A[3]
movq 24(%rdi), %rax
mulq %rax
addq %rax, %r14
adcq %rdx, %r15
addq %rbp, %r13
adcq $0x00, %r14
adcq $0x00, %r15
movq $38, %rax
mulq %r15
addq %rax, %r11
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %rbp
shldq $0x01, %r11, %rdx
imulq $19, %rdx, %rdx
andq %rbp, %r11
movq %rdx, %rbp
movq $38, %rax
mulq %r12
xorq %r12, %r12
addq %rax, %rcx
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
adcq %rdx, %r14
addq %rbp, %rcx
adcq %r12, %r9
adcq %r13, %r10
adcq %r14, %r11
# Store
movq %rcx, 128(%rsp)
movq %r9, 136(%rsp)
movq %r10, 144(%rsp)
movq %r11, 152(%rsp)
# Add-Sub
# Add
movq (%rsp), %rcx
movq 8(%rsp), %r9
movq 16(%rsp), %r10
movq 24(%rsp), %r11
movq %rcx, %r12
addq 32(%rsp), %rcx
movq %r9, %r13
adcq 40(%rsp), %r9
movq %r10, %r14
adcq 48(%rsp), %r10
movq %r11, %r15
adcq 56(%rsp), %r11
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r11, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r11
# Sub modulus (if overflow)
addq %rax, %rcx
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
# Sub
subq 32(%rsp), %r12
sbbq 40(%rsp), %r13
sbbq 48(%rsp), %r14
sbbq 56(%rsp), %r15
sbbq %rax, %rax
shldq $0x01, %r15, %rax
imulq $-19, %rax
andq %rdx, %r15
# Add modulus (if underflow)
subq %rax, %r12
sbbq $0x00, %r13
sbbq $0x00, %r14
sbbq $0x00, %r15
movq %rcx, 64(%rsp)
movq %r9, 72(%rsp)
movq %r10, 80(%rsp)
movq %r11, 88(%rsp)
movq %r12, 32(%rsp)
movq %r13, 40(%rsp)
movq %r14, 48(%rsp)
movq %r15, 56(%rsp)
# Multiply
# A[0] * B[0]
movq 96(%rsp), %rax
mulq 128(%rsp)
movq %rax, %rcx
movq %rdx, %r9
# A[0] * B[1]
movq 104(%rsp), %rax
mulq 128(%rsp)
xorq %r10, %r10
addq %rax, %r9
adcq %rdx, %r10
# A[1] * B[0]
movq 96(%rsp), %rax
mulq 136(%rsp)
xorq %r11, %r11
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r11
# A[0] * B[2]
movq 112(%rsp), %rax
mulq 128(%rsp)
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[1]
movq 104(%rsp), %rax
mulq 136(%rsp)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * B[0]
movq 96(%rsp), %rax
mulq 144(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[3]
movq 120(%rsp), %rax
mulq 128(%rsp)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[2]
movq 112(%rsp), %rax
mulq 136(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[1]
movq 104(%rsp), %rax
mulq 144(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * B[0]
movq 96(%rsp), %rax
mulq 152(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[3]
movq 120(%rsp), %rax
mulq 136(%rsp)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[2]
movq 112(%rsp), %rax
mulq 144(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[1]
movq 104(%rsp), %rax
mulq 152(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[3]
movq 120(%rsp), %rax
mulq 144(%rsp)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[2]
movq 112(%rsp), %rax
mulq 152(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[3]
movq 120(%rsp), %rax
mulq 152(%rsp)
addq %rax, %r14
adcq %rdx, %r15
movq $38, %rax
mulq %r15
addq %rax, %r11
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %rbp
shldq $0x01, %r11, %rdx
imulq $19, %rdx, %rdx
andq %rbp, %r11
movq %rdx, %rbp
movq $38, %rax
mulq %r12
xorq %r12, %r12
addq %rax, %rcx
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
adcq %rdx, %r14
addq %rbp, %rcx
adcq %r12, %r9
adcq %r13, %r10
adcq %r14, %r11
# Store
movq %rcx, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
# Sub
movq 128(%rsp), %rcx
movq 136(%rsp), %r9
movq 144(%rsp), %r10
movq 152(%rsp), %r11
subq 96(%rsp), %rcx
sbbq 104(%rsp), %r9
sbbq 112(%rsp), %r10
sbbq 120(%rsp), %r11
sbbq %rax, %rax
shldq $0x01, %r11, %rax
movq $0x7fffffffffffffff, %rdx
imulq $-19, %rax
andq %rdx, %r11
# Add modulus (if underflow)
subq %rax, %rcx
sbbq $0x00, %r9
sbbq $0x00, %r10
sbbq $0x00, %r11
movq %rcx, 128(%rsp)
movq %r9, 136(%rsp)
movq %r10, 144(%rsp)
movq %r11, 152(%rsp)
# Square
# A[0] * A[1]
movq 32(%rsp), %rax
mulq 40(%rsp)
movq %rax, %r9
movq %rdx, %r10
# A[0] * A[2]
movq 32(%rsp), %rax
mulq 48(%rsp)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[0] * A[3]
movq 32(%rsp), %rax
mulq 56(%rsp)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * A[2]
movq 40(%rsp), %rax
mulq 48(%rsp)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * A[3]
movq 40(%rsp), %rax
mulq 56(%rsp)
addq %rax, %r12
adcq %rdx, %r13
# A[2] * A[3]
movq 48(%rsp), %rax
mulq 56(%rsp)
xorq %r14, %r14
addq %rax, %r13
adcq %rdx, %r14
# Double
xorq %r15, %r15
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq $0x00, %r15
# A[0] * A[0]
movq 32(%rsp), %rax
mulq %rax
movq %rax, %rcx
movq %rdx, %rbp
# A[1] * A[1]
movq 40(%rsp), %rax
mulq %rax
addq %rbp, %r9
adcq %rax, %r10
adcq $0x00, %rdx
movq %rdx, %rbp
# A[2] * A[2]
movq 48(%rsp), %rax
mulq %rax
addq %rbp, %r11
adcq %rax, %r12
adcq $0x00, %rdx
movq %rdx, %rbp
# A[3] * A[3]
movq 56(%rsp), %rax
mulq %rax
addq %rax, %r14
adcq %rdx, %r15
addq %rbp, %r13
adcq $0x00, %r14
adcq $0x00, %r15
movq $38, %rax
mulq %r15
addq %rax, %r11
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %rbp
shldq $0x01, %r11, %rdx
imulq $19, %rdx, %rdx
andq %rbp, %r11
movq %rdx, %rbp
movq $38, %rax
mulq %r12
xorq %r12, %r12
addq %rax, %rcx
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
adcq %rdx, %r14
addq %rbp, %rcx
adcq %r12, %r9
adcq %r13, %r10
adcq %r14, %r11
# Store
movq %rcx, 32(%rsp)
movq %r9, 40(%rsp)
movq %r10, 48(%rsp)
movq %r11, 56(%rsp)
# Multiply by 121666
movq $0x1db42, %rax
mulq 128(%rsp)
xorq %r10, %r10
movq %rax, %rcx
movq %rdx, %r9
movq $0x1db42, %rax
mulq 136(%rsp)
xorq %r11, %r11
addq %rax, %r9
adcq %rdx, %r10
movq $0x1db42, %rax
mulq 144(%rsp)
xorq %r13, %r13
addq %rax, %r10
adcq %rdx, %r11
movq $0x1db42, %rax
mulq 152(%rsp)
movq $0x7fffffffffffffff, %r12
addq %rax, %r11
adcq %rdx, %r13
shldq $0x01, %r11, %r13
andq %r12, %r11
movq $19, %rax
mulq %r13
addq %rax, %rcx
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
movq %rcx, (%rsp)
movq %r9, 8(%rsp)
movq %r10, 16(%rsp)
movq %r11, 24(%rsp)
# Square
# A[0] * A[1]
movq 64(%rsp), %rax
mulq 72(%rsp)
movq %rax, %r9
movq %rdx, %r10
# A[0] * A[2]
movq 64(%rsp), %rax
mulq 80(%rsp)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[0] * A[3]
movq 64(%rsp), %rax
mulq 88(%rsp)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * A[2]
movq 72(%rsp), %rax
mulq 80(%rsp)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * A[3]
movq 72(%rsp), %rax
mulq 88(%rsp)
addq %rax, %r12
adcq %rdx, %r13
# A[2] * A[3]
movq 80(%rsp), %rax
mulq 88(%rsp)
xorq %r14, %r14
addq %rax, %r13
adcq %rdx, %r14
# Double
xorq %r15, %r15
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq $0x00, %r15
# A[0] * A[0]
movq 64(%rsp), %rax
mulq %rax
movq %rax, %rcx
movq %rdx, %rbp
# A[1] * A[1]
movq 72(%rsp), %rax
mulq %rax
addq %rbp, %r9
adcq %rax, %r10
adcq $0x00, %rdx
movq %rdx, %rbp
# A[2] * A[2]
movq 80(%rsp), %rax
mulq %rax
addq %rbp, %r11
adcq %rax, %r12
adcq $0x00, %rdx
movq %rdx, %rbp
# A[3] * A[3]
movq 88(%rsp), %rax
mulq %rax
addq %rax, %r14
adcq %rdx, %r15
addq %rbp, %r13
adcq $0x00, %r14
adcq $0x00, %r15
movq $38, %rax
mulq %r15
addq %rax, %r11
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %rbp
shldq $0x01, %r11, %rdx
imulq $19, %rdx, %rdx
andq %rbp, %r11
movq %rdx, %rbp
movq $38, %rax
mulq %r12
xorq %r12, %r12
addq %rax, %rcx
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
adcq %rdx, %r14
addq %rbp, %rcx
adcq %r12, %r9
adcq %r13, %r10
adcq %r14, %r11
# Store
movq %rcx, 64(%rsp)
movq %r9, 72(%rsp)
movq %r10, 80(%rsp)
movq %r11, 88(%rsp)
# Add
movq 96(%rsp), %rcx
movq 104(%rsp), %r9
addq (%rsp), %rcx
movq 112(%rsp), %r10
adcq 8(%rsp), %r9
movq 120(%rsp), %r11
adcq 16(%rsp), %r10
adcq 24(%rsp), %r11
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r11, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r11
# Sub modulus (if overflow)
addq %rax, %rcx
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
movq %rcx, 96(%rsp)
movq %r9, 104(%rsp)
movq %r10, 112(%rsp)
movq %r11, 120(%rsp)
# Multiply
# A[0] * B[0]
movq 32(%rsp), %rax
mulq (%r8)
movq %rax, %rcx
movq %rdx, %r9
# A[0] * B[1]
movq 40(%rsp), %rax
mulq (%r8)
xorq %r10, %r10
addq %rax, %r9
adcq %rdx, %r10
# A[1] * B[0]
movq 32(%rsp), %rax
mulq 8(%r8)
xorq %r11, %r11
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r11
# A[0] * B[2]
movq 48(%rsp), %rax
mulq (%r8)
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[1]
movq 40(%rsp), %rax
mulq 8(%r8)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * B[0]
movq 32(%rsp), %rax
mulq 16(%r8)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[3]
movq 56(%rsp), %rax
mulq (%r8)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[2]
movq 48(%rsp), %rax
mulq 8(%r8)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[1]
movq 40(%rsp), %rax
mulq 16(%r8)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * B[0]
movq 32(%rsp), %rax
mulq 24(%r8)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[3]
movq 56(%rsp), %rax
mulq 8(%r8)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[2]
movq 48(%rsp), %rax
mulq 16(%r8)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[1]
movq 40(%rsp), %rax
mulq 24(%r8)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[3]
movq 56(%rsp), %rax
mulq 16(%r8)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[2]
movq 48(%rsp), %rax
mulq 24(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[3]
movq 56(%rsp), %rax
mulq 24(%r8)
addq %rax, %r14
adcq %rdx, %r15
movq $38, %rax
mulq %r15
addq %rax, %r11
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %rbp
shldq $0x01, %r11, %rdx
imulq $19, %rdx, %rdx
andq %rbp, %r11
movq %rdx, %rbp
movq $38, %rax
mulq %r12
xorq %r12, %r12
addq %rax, %rcx
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
adcq %rdx, %r14
addq %rbp, %rcx
adcq %r12, %r9
adcq %r13, %r10
adcq %r14, %r11
# Store
movq %rcx, 32(%rsp)
movq %r9, 40(%rsp)
movq %r10, 48(%rsp)
movq %r11, 56(%rsp)
# Multiply
# A[0] * B[0]
movq 96(%rsp), %rax
mulq 128(%rsp)
movq %rax, %rcx
movq %rdx, %r9
# A[0] * B[1]
movq 104(%rsp), %rax
mulq 128(%rsp)
xorq %r10, %r10
addq %rax, %r9
adcq %rdx, %r10
# A[1] * B[0]
movq 96(%rsp), %rax
mulq 136(%rsp)
xorq %r11, %r11
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r11
# A[0] * B[2]
movq 112(%rsp), %rax
mulq 128(%rsp)
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[1]
movq 104(%rsp), %rax
mulq 136(%rsp)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * B[0]
movq 96(%rsp), %rax
mulq 144(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[3]
movq 120(%rsp), %rax
mulq 128(%rsp)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[2]
movq 112(%rsp), %rax
mulq 136(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[1]
movq 104(%rsp), %rax
mulq 144(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * B[0]
movq 96(%rsp), %rax
mulq 152(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[3]
movq 120(%rsp), %rax
mulq 136(%rsp)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[2]
movq 112(%rsp), %rax
mulq 144(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[1]
movq 104(%rsp), %rax
mulq 152(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[3]
movq 120(%rsp), %rax
mulq 144(%rsp)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[2]
movq 112(%rsp), %rax
mulq 152(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[3]
movq 120(%rsp), %rax
mulq 152(%rsp)
addq %rax, %r14
adcq %rdx, %r15
movq $38, %rax
mulq %r15
addq %rax, %r11
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %rbp
shldq $0x01, %r11, %rdx
imulq $19, %rdx, %rdx
andq %rbp, %r11
movq %rdx, %rbp
movq $38, %rax
mulq %r12
xorq %r12, %r12
addq %rax, %rcx
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
adcq %rdx, %r14
addq %rbp, %rcx
adcq %r12, %r9
adcq %r13, %r10
adcq %r14, %r11
# Store
movq %rcx, (%rsp)
movq %r9, 8(%rsp)
movq %r10, 16(%rsp)
movq %r11, 24(%rsp)
movq 160(%rsp), %r9
decq %r9
jge L_curve25519_x64_bits
# Invert
leaq 32(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
movq %rsp, %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 96(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $4, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $9, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 128(%rsp), %rdi
leaq 96(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 128(%rsp), %rdi
leaq 128(%rsp), %rsi
movq $19, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 128(%rsp), %rsi
leaq 96(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $9, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $49, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 128(%rsp), %rdi
leaq 96(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 128(%rsp), %rdi
leaq 128(%rsp), %rsi
movq $0x63, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 128(%rsp), %rsi
leaq 96(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $49, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $4, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
movq %rsp, %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
movq 168(%rsp), %rdi
# Multiply
# A[0] * B[0]
movq (%rsp), %rax
mulq (%rdi)
movq %rax, %rcx
movq %rdx, %r9
# A[0] * B[1]
movq 8(%rsp), %rax
mulq (%rdi)
xorq %r10, %r10
addq %rax, %r9
adcq %rdx, %r10
# A[1] * B[0]
movq (%rsp), %rax
mulq 8(%rdi)
xorq %r11, %r11
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r11
# A[0] * B[2]
movq 16(%rsp), %rax
mulq (%rdi)
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[1]
movq 8(%rsp), %rax
mulq 8(%rdi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * B[0]
movq (%rsp), %rax
mulq 16(%rdi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[3]
movq 24(%rsp), %rax
mulq (%rdi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[2]
movq 16(%rsp), %rax
mulq 8(%rdi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[1]
movq 8(%rsp), %rax
mulq 16(%rdi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * B[0]
movq (%rsp), %rax
mulq 24(%rdi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[3]
movq 24(%rsp), %rax
mulq 8(%rdi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[2]
movq 16(%rsp), %rax
mulq 16(%rdi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[1]
movq 8(%rsp), %rax
mulq 24(%rdi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[3]
movq 24(%rsp), %rax
mulq 16(%rdi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[2]
movq 16(%rsp), %rax
mulq 24(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[3]
movq 24(%rsp), %rax
mulq 24(%rdi)
addq %rax, %r14
adcq %rdx, %r15
movq $38, %rax
mulq %r15
addq %rax, %r11
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %rbp
shldq $0x01, %r11, %rdx
imulq $19, %rdx, %rdx
andq %rbp, %r11
movq %rdx, %rbp
movq $38, %rax
mulq %r12
xorq %r12, %r12
addq %rax, %rcx
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
adcq %rdx, %r14
addq %rbp, %rcx
adcq %r12, %r9
adcq %r13, %r10
adcq %r14, %r11
movq $0x7fffffffffffffff, %rbp
movq %r11, %rax
sarq $63, %rax
andq $19, %rax
andq %rbp, %r11
addq %rax, %rcx
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
movq $0x7fffffffffffffff, %rax
movq %rcx, %rdx
addq $19, %rdx
movq %r9, %rdx
adcq $0x00, %rdx
movq %r10, %rdx
adcq $0x00, %rdx
movq %r11, %rdx
adcq $0x00, %rdx
sarq $63, %rdx
andq $19, %rdx
andq %rax, %r11
addq %rdx, %rcx
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
# Store
movq %rcx, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
xorq %rax, %rax
addq $0xb0, %rsp
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size curve25519_x64,.-curve25519_x64
#endif /* __APPLE__ */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl fe_sq2_x64
.type fe_sq2_x64,@function
.align 16
fe_sq2_x64:
#else
.section __TEXT,__text
.globl _fe_sq2_x64
.p2align 4
_fe_sq2_x64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
# Square * 2
# A[0] * A[1]
movq (%rsi), %rax
mulq 8(%rsi)
movq %rax, %r8
movq %rdx, %r9
# A[0] * A[2]
movq (%rsi), %rax
mulq 16(%rsi)
xorq %r10, %r10
addq %rax, %r9
adcq %rdx, %r10
# A[0] * A[3]
movq (%rsi), %rax
mulq 24(%rsi)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[1] * A[2]
movq 8(%rsi), %rax
mulq 16(%rsi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[1] * A[3]
movq 8(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r11
adcq %rdx, %r12
# A[2] * A[3]
movq 16(%rsi), %rax
mulq 24(%rsi)
xorq %r13, %r13
addq %rax, %r12
adcq %rdx, %r13
# Double
xorq %r14, %r14
addq %r8, %r8
adcq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq $0x00, %r14
# A[0] * A[0]
movq (%rsi), %rax
mulq %rax
movq %rax, %rcx
movq %rdx, %r15
# A[1] * A[1]
movq 8(%rsi), %rax
mulq %rax
addq %r15, %r8
adcq %rax, %r9
adcq $0x00, %rdx
movq %rdx, %r15
# A[2] * A[2]
movq 16(%rsi), %rax
mulq %rax
addq %r15, %r10
adcq %rax, %r11
adcq $0x00, %rdx
movq %rdx, %r15
# A[3] * A[3]
movq 24(%rsi), %rax
mulq %rax
addq %rax, %r13
adcq %rdx, %r14
addq %r15, %r12
adcq $0x00, %r13
adcq $0x00, %r14
movq $38, %rax
mulq %r14
addq %rax, %r10
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r15
shldq $0x01, %r10, %rdx
imulq $19, %rdx, %rdx
andq %r15, %r10
movq %rdx, %r15
movq $38, %rax
mulq %r11
xorq %r11, %r11
addq %rax, %rcx
movq $38, %rax
adcq %rdx, %r11
mulq %r12
xorq %r12, %r12
addq %rax, %r8
movq $38, %rax
adcq %rdx, %r12
mulq %r13
xorq %r13, %r13
addq %rax, %r9
adcq %rdx, %r13
addq %r15, %rcx
adcq %r11, %r8
adcq %r12, %r9
adcq %r13, %r10
movq %r10, %rax
shldq $0x01, %r9, %r10
shldq $0x01, %r8, %r9
shldq $0x01, %rcx, %r8
shlq $0x01, %rcx
movq $0x7fffffffffffffff, %r15
shrq $62, %rax
andq %r15, %r10
imulq $19, %rax, %rax
addq %rax, %rcx
adcq $0x00, %r8
adcq $0x00, %r9
adcq $0x00, %r10
# Store
movq %rcx, (%rdi)
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq %r10, 24(%rdi)
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size fe_sq2_x64,.-fe_sq2_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_pow22523_x64
.type fe_pow22523_x64,@function
.align 16
fe_pow22523_x64:
#else
.section __TEXT,__text
.globl _fe_pow22523_x64
.p2align 4
_fe_pow22523_x64:
#endif /* __APPLE__ */
subq $0x70, %rsp
# pow22523
movq %rdi, 96(%rsp)
movq %rsi, 104(%rsp)
movq %rsp, %rdi
movq 104(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq 104(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
movq %rsp, %rdi
movq %rsp, %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
movq %rsp, %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
movq %rsp, %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq $4, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
movq %rsp, %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq $9, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $19, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq $9, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
movq %rsp, %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq $49, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $0x63, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq $49, %rdx
#ifndef __APPLE__
callq fe_sq_n_x64@plt
#else
callq _fe_sq_n_x64
#endif /* __APPLE__ */
movq %rsp, %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
movq %rsp, %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
movq %rsp, %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_x64@plt
#else
callq _fe_sq_x64
#endif /* __APPLE__ */
movq 96(%rsp), %rdi
movq %rsp, %rsi
movq 104(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_x64@plt
#else
callq _fe_mul_x64
#endif /* __APPLE__ */
movq 104(%rsp), %rsi
movq 96(%rsp), %rdi
addq $0x70, %rsp
repz retq
#ifndef __APPLE__
.text
.globl ge_p1p1_to_p2_x64
.type ge_p1p1_to_p2_x64,@function
.align 16
ge_p1p1_to_p2_x64:
#else
.section __TEXT,__text
.globl _ge_p1p1_to_p2_x64
.p2align 4
_ge_p1p1_to_p2_x64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
subq $16, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rsi, %rcx
addq $0x60, %rcx
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
movq %rax, %r9
movq %rdx, %r10
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r15
adcq %rdx, %rbx
movq $38, %rax
mulq %rbx
addq %rax, %r12
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rdx
imulq $19, %rdx, %rdx
andq %r8, %r12
movq %rdx, %r8
movq $38, %rax
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
adcq %rdx, %r15
addq %r8, %r9
adcq %r13, %r10
adcq %r14, %r11
adcq %r15, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
addq $0x40, %rsi
addq $0x40, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
movq %rax, %r9
movq %rdx, %r10
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r15
adcq %rdx, %rbx
movq $38, %rax
mulq %rbx
addq %rax, %r12
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rdx
imulq $19, %rdx, %rdx
andq %r8, %r12
movq %rdx, %r8
movq $38, %rax
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
adcq %rdx, %r15
addq %r8, %r9
adcq %r13, %r10
adcq %r14, %r11
adcq %r15, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
movq %rsi, %rcx
subq $32, %rcx
subq $32, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
movq %rax, %r9
movq %rdx, %r10
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r15
adcq %rdx, %rbx
movq $38, %rax
mulq %rbx
addq %rax, %r12
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rdx
imulq $19, %rdx, %rdx
andq %r8, %r12
movq %rdx, %r8
movq $38, %rax
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
adcq %rdx, %r15
addq %r8, %r9
adcq %r13, %r10
adcq %r14, %r11
adcq %r15, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
addq $16, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_p1p1_to_p2_x64,.-ge_p1p1_to_p2_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_p1p1_to_p3_x64
.type ge_p1p1_to_p3_x64,@function
.align 16
ge_p1p1_to_p3_x64:
#else
.section __TEXT,__text
.globl _ge_p1p1_to_p3_x64
.p2align 4
_ge_p1p1_to_p3_x64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
subq $16, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rsi, %rcx
addq $0x60, %rcx
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
movq %rax, %r9
movq %rdx, %r10
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r15
adcq %rdx, %rbx
movq $38, %rax
mulq %rbx
addq %rax, %r12
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rdx
imulq $19, %rdx, %rdx
andq %r8, %r12
movq %rdx, %r8
movq $38, %rax
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
adcq %rdx, %r15
addq %r8, %r9
adcq %r13, %r10
adcq %r14, %r11
adcq %r15, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
movq %rsi, %rcx
addq $32, %rcx
addq $0x60, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
movq %rax, %r9
movq %rdx, %r10
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r15
adcq %rdx, %rbx
movq $38, %rax
mulq %rbx
addq %rax, %r12
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rdx
imulq $19, %rdx, %rdx
andq %r8, %r12
movq %rdx, %r8
movq $38, %rax
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
adcq %rdx, %r15
addq %r8, %r9
adcq %r13, %r10
adcq %r14, %r11
adcq %r15, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
addq $0x40, %rsi
subq $0x40, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
movq %rax, %r9
movq %rdx, %r10
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r15
adcq %rdx, %rbx
movq $38, %rax
mulq %rbx
addq %rax, %r12
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rdx
imulq $19, %rdx, %rdx
andq %r8, %r12
movq %rdx, %r8
movq $38, %rax
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
adcq %rdx, %r15
addq %r8, %r9
adcq %r13, %r10
adcq %r14, %r11
adcq %r15, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
movq %rsi, %rcx
addq $32, %rcx
addq $32, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
movq %rax, %r9
movq %rdx, %r10
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r15
adcq %rdx, %rbx
movq $38, %rax
mulq %rbx
addq %rax, %r12
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rdx
imulq $19, %rdx, %rdx
andq %r8, %r12
movq %rdx, %r8
movq $38, %rax
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
adcq %rdx, %r15
addq %r8, %r9
adcq %r13, %r10
adcq %r14, %r11
adcq %r15, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
addq $16, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_p1p1_to_p3_x64,.-ge_p1p1_to_p3_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_p2_dbl_x64
.type ge_p2_dbl_x64,@function
.align 16
ge_p2_dbl_x64:
#else
.section __TEXT,__text
.globl _ge_p2_dbl_x64
.p2align 4
_ge_p2_dbl_x64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
subq $16, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
addq $0x40, %rdi
# Square
# A[0] * A[1]
movq (%rsi), %rax
mulq 8(%rsi)
movq %rax, %r10
movq %rdx, %r11
# A[0] * A[2]
movq (%rsi), %rax
mulq 16(%rsi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[0] * A[3]
movq (%rsi), %rax
mulq 24(%rsi)
xorq %r13, %r13
addq %rax, %r12
adcq %rdx, %r13
# A[1] * A[2]
movq 8(%rsi), %rax
mulq 16(%rsi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * A[3]
movq 8(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r14
# A[2] * A[3]
movq 16(%rsi), %rax
mulq 24(%rsi)
xorq %r15, %r15
addq %rax, %r14
adcq %rdx, %r15
# Double
xorq %rbx, %rbx
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %r15, %r15
adcq $0x00, %rbx
# A[0] * A[0]
movq (%rsi), %rax
mulq %rax
movq %rax, %r9
movq %rdx, %r8
# A[1] * A[1]
movq 8(%rsi), %rax
mulq %rax
addq %r8, %r10
adcq %rax, %r11
adcq $0x00, %rdx
movq %rdx, %r8
# A[2] * A[2]
movq 16(%rsi), %rax
mulq %rax
addq %r8, %r12
adcq %rax, %r13
adcq $0x00, %rdx
movq %rdx, %r8
# A[3] * A[3]
movq 24(%rsi), %rax
mulq %rax
addq %rax, %r15
adcq %rdx, %rbx
addq %r8, %r14
adcq $0x00, %r15
adcq $0x00, %rbx
movq $38, %rax
mulq %rbx
addq %rax, %r12
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rdx
imulq $19, %rdx, %rdx
andq %r8, %r12
movq %rdx, %r8
movq $38, %rax
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
adcq %rdx, %r15
addq %r8, %r9
adcq %r13, %r10
adcq %r14, %r11
adcq %r15, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
addq $32, %rsi
# Square
# A[0] * A[1]
movq (%rsi), %rax
mulq 8(%rsi)
movq %rax, %r10
movq %rdx, %r11
# A[0] * A[2]
movq (%rsi), %rax
mulq 16(%rsi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[0] * A[3]
movq (%rsi), %rax
mulq 24(%rsi)
xorq %r13, %r13
addq %rax, %r12
adcq %rdx, %r13
# A[1] * A[2]
movq 8(%rsi), %rax
mulq 16(%rsi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * A[3]
movq 8(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r14
# A[2] * A[3]
movq 16(%rsi), %rax
mulq 24(%rsi)
xorq %r15, %r15
addq %rax, %r14
adcq %rdx, %r15
# Double
xorq %rbx, %rbx
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %r15, %r15
adcq $0x00, %rbx
# A[0] * A[0]
movq (%rsi), %rax
mulq %rax
movq %rax, %r9
movq %rdx, %r8
# A[1] * A[1]
movq 8(%rsi), %rax
mulq %rax
addq %r8, %r10
adcq %rax, %r11
adcq $0x00, %rdx
movq %rdx, %r8
# A[2] * A[2]
movq 16(%rsi), %rax
mulq %rax
addq %r8, %r12
adcq %rax, %r13
adcq $0x00, %rdx
movq %rdx, %r8
# A[3] * A[3]
movq 24(%rsi), %rax
mulq %rax
addq %rax, %r15
adcq %rdx, %rbx
addq %r8, %r14
adcq $0x00, %r15
adcq $0x00, %rbx
movq $38, %rax
mulq %rbx
addq %rax, %r12
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rdx
imulq $19, %rdx, %rdx
andq %r8, %r12
movq %rdx, %r8
movq $38, %rax
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
adcq %rdx, %r15
addq %r8, %r9
adcq %r13, %r10
adcq %r14, %r11
adcq %r15, %r12
# Store
movq %rdi, %rsi
subq $32, %rdi
# Add-Sub
# Add
movq %r9, %r13
addq (%rsi), %r9
movq %r10, %r14
adcq 8(%rsi), %r10
movq %r11, %r15
adcq 16(%rsi), %r11
movq %r12, %rbx
adcq 24(%rsi), %r12
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r12, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r12
# Sub modulus (if overflow)
addq %rax, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
# Sub
subq (%rsi), %r13
sbbq 8(%rsi), %r14
sbbq 16(%rsi), %r15
sbbq 24(%rsi), %rbx
sbbq %rax, %rax
shldq $0x01, %rbx, %rax
imulq $-19, %rax
andq %rdx, %rbx
# Add modulus (if underflow)
subq %rax, %r13
sbbq $0x00, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
movq %r13, (%rsi)
movq %r14, 8(%rsi)
movq %r15, 16(%rsi)
movq %rbx, 24(%rsi)
movq 8(%rsp), %rcx
movq %rcx, %rsi
addq $32, %rsi
subq $32, %rdi
# Add
movq (%rsi), %r9
movq 8(%rsi), %r10
addq (%rcx), %r9
movq 16(%rsi), %r11
adcq 8(%rcx), %r10
movq 24(%rsi), %r12
adcq 16(%rcx), %r11
adcq 24(%rcx), %r12
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r12, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r12
# Sub modulus (if overflow)
addq %rax, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
# Square
# A[0] * A[1]
movq (%rdi), %rax
mulq 8(%rdi)
movq %rax, %r10
movq %rdx, %r11
# A[0] * A[2]
movq (%rdi), %rax
mulq 16(%rdi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[0] * A[3]
movq (%rdi), %rax
mulq 24(%rdi)
xorq %r13, %r13
addq %rax, %r12
adcq %rdx, %r13
# A[1] * A[2]
movq 8(%rdi), %rax
mulq 16(%rdi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * A[3]
movq 8(%rdi), %rax
mulq 24(%rdi)
addq %rax, %r13
adcq %rdx, %r14
# A[2] * A[3]
movq 16(%rdi), %rax
mulq 24(%rdi)
xorq %r15, %r15
addq %rax, %r14
adcq %rdx, %r15
# Double
xorq %rbx, %rbx
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %r15, %r15
adcq $0x00, %rbx
# A[0] * A[0]
movq (%rdi), %rax
mulq %rax
movq %rax, %r9
movq %rdx, %r8
# A[1] * A[1]
movq 8(%rdi), %rax
mulq %rax
addq %r8, %r10
adcq %rax, %r11
adcq $0x00, %rdx
movq %rdx, %r8
# A[2] * A[2]
movq 16(%rdi), %rax
mulq %rax
addq %r8, %r12
adcq %rax, %r13
adcq $0x00, %rdx
movq %rdx, %r8
# A[3] * A[3]
movq 24(%rdi), %rax
mulq %rax
addq %rax, %r15
adcq %rdx, %rbx
addq %r8, %r14
adcq $0x00, %r15
adcq $0x00, %rbx
movq $38, %rax
mulq %rbx
addq %rax, %r12
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rdx
imulq $19, %rdx, %rdx
andq %r8, %r12
movq %rdx, %r8
movq $38, %rax
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
adcq %rdx, %r15
addq %r8, %r9
adcq %r13, %r10
adcq %r14, %r11
adcq %r15, %r12
# Store
movq %rdi, %rsi
addq $32, %rsi
# Sub
subq (%rsi), %r9
sbbq 8(%rsi), %r10
sbbq 16(%rsi), %r11
sbbq 24(%rsi), %r12
sbbq %rax, %rax
shldq $0x01, %r12, %rax
movq $0x7fffffffffffffff, %rdx
imulq $-19, %rax
andq %rdx, %r12
# Add modulus (if underflow)
subq %rax, %r9
sbbq $0x00, %r10
sbbq $0x00, %r11
sbbq $0x00, %r12
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
addq $0x40, %rcx
# Square * 2
# A[0] * A[1]
movq (%rcx), %rax
mulq 8(%rcx)
movq %rax, %r10
movq %rdx, %r11
# A[0] * A[2]
movq (%rcx), %rax
mulq 16(%rcx)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[0] * A[3]
movq (%rcx), %rax
mulq 24(%rcx)
xorq %r13, %r13
addq %rax, %r12
adcq %rdx, %r13
# A[1] * A[2]
movq 8(%rcx), %rax
mulq 16(%rcx)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * A[3]
movq 8(%rcx), %rax
mulq 24(%rcx)
addq %rax, %r13
adcq %rdx, %r14
# A[2] * A[3]
movq 16(%rcx), %rax
mulq 24(%rcx)
xorq %r15, %r15
addq %rax, %r14
adcq %rdx, %r15
# Double
xorq %rbx, %rbx
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %r15, %r15
adcq $0x00, %rbx
# A[0] * A[0]
movq (%rcx), %rax
mulq %rax
movq %rax, %r9
movq %rdx, %r8
# A[1] * A[1]
movq 8(%rcx), %rax
mulq %rax
addq %r8, %r10
adcq %rax, %r11
adcq $0x00, %rdx
movq %rdx, %r8
# A[2] * A[2]
movq 16(%rcx), %rax
mulq %rax
addq %r8, %r12
adcq %rax, %r13
adcq $0x00, %rdx
movq %rdx, %r8
# A[3] * A[3]
movq 24(%rcx), %rax
mulq %rax
addq %rax, %r15
adcq %rdx, %rbx
addq %r8, %r14
adcq $0x00, %r15
adcq $0x00, %rbx
movq $38, %rax
mulq %rbx
addq %rax, %r12
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rdx
imulq $19, %rdx, %rdx
andq %r8, %r12
movq %rdx, %r8
movq $38, %rax
mulq %r13
xorq %r13, %r13
addq %rax, %r9
movq $38, %rax
adcq %rdx, %r13
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
adcq %rdx, %r15
addq %r8, %r9
adcq %r13, %r10
adcq %r14, %r11
adcq %r15, %r12
movq %r12, %rax
shldq $0x01, %r11, %r12
shldq $0x01, %r10, %r11
shldq $0x01, %r9, %r10
shlq $0x01, %r9
movq $0x7fffffffffffffff, %r8
shrq $62, %rax
andq %r8, %r12
imulq $19, %rax, %rax
addq %rax, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
# Store
movq %rdi, %rsi
addq $0x40, %rsi
addq $0x60, %rdi
# Sub
subq (%rsi), %r9
sbbq 8(%rsi), %r10
sbbq 16(%rsi), %r11
sbbq 24(%rsi), %r12
sbbq %rax, %rax
shldq $0x01, %r12, %rax
movq $0x7fffffffffffffff, %rdx
imulq $-19, %rax
andq %rdx, %r12
# Add modulus (if underflow)
subq %rax, %r9
sbbq $0x00, %r10
sbbq $0x00, %r11
sbbq $0x00, %r12
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
addq $16, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_p2_dbl_x64,.-ge_p2_dbl_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_madd_x64
.type ge_madd_x64,@function
.align 16
ge_madd_x64:
#else
.section __TEXT,__text
.globl _ge_madd_x64
.p2align 4
_ge_madd_x64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %rcx
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rcx, 16(%rsp)
movq %rsi, %r8
movq %rsi, %rcx
addq $32, %rcx
movq %rdi, %rsi
addq $32, %rsi
# Add-Sub
# Add
movq (%rcx), %r10
movq 8(%rcx), %r11
movq 16(%rcx), %r12
movq 24(%rcx), %r13
movq %r10, %r14
addq (%r8), %r10
movq %r11, %r15
adcq 8(%r8), %r11
movq %r12, %rbx
adcq 16(%r8), %r12
movq %r13, %rbp
adcq 24(%r8), %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%r8), %r14
sbbq 8(%r8), %r15
sbbq 16(%r8), %rbx
sbbq 24(%r8), %rbp
sbbq %rax, %rax
shldq $0x01, %rbp, %rax
imulq $-19, %rax
andq %rdx, %rbp
# Add modulus (if underflow)
subq %rax, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
movq 16(%rsp), %rcx
addq $32, %rcx
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
addq $0x60, %r8
addq $32, %rcx
addq $0x60, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%r8)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%r8)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%r8)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%r8)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%r8)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%r8)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%r8)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%r8)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%r8)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%r8)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%r8)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%r8)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%r8)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
subq $0x40, %rcx
subq $0x60, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rdi)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rdi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rdi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rdi)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rdi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rdi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rdi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rdi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rdi)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rdi)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
# Add-Sub
# Add
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %rax, %rax
shldq $0x01, %rbp, %rax
imulq $-19, %rax
andq %rdx, %rbp
# Add modulus (if underflow)
subq %rax, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
movq %r14, (%rdi)
movq %r15, 8(%rdi)
movq %rbx, 16(%rdi)
movq %rbp, 24(%rdi)
subq $32, %r8
# Double
movq (%r8), %r10
movq 8(%r8), %r11
addq %r10, %r10
movq 16(%r8), %r12
adcq %r11, %r11
movq 24(%r8), %r13
adcq %r12, %r12
adcq %r13, %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
movq %rdi, %rsi
addq $0x60, %rsi
addq $0x40, %rdi
# Add-Sub
# Add
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %rax, %rax
shldq $0x01, %rbp, %rax
imulq $-19, %rax
andq %rdx, %rbp
# Add modulus (if underflow)
subq %rax, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
addq $24, %rsp
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_madd_x64,.-ge_madd_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_msub_x64
.type ge_msub_x64,@function
.align 16
ge_msub_x64:
#else
.section __TEXT,__text
.globl _ge_msub_x64
.p2align 4
_ge_msub_x64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %rcx
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rcx, 16(%rsp)
movq %rsi, %r8
movq %rsi, %rcx
addq $32, %rcx
movq %rdi, %rsi
addq $32, %rsi
# Add-Sub
# Add
movq (%rcx), %r10
movq 8(%rcx), %r11
movq 16(%rcx), %r12
movq 24(%rcx), %r13
movq %r10, %r14
addq (%r8), %r10
movq %r11, %r15
adcq 8(%r8), %r11
movq %r12, %rbx
adcq 16(%r8), %r12
movq %r13, %rbp
adcq 24(%r8), %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%r8), %r14
sbbq 8(%r8), %r15
sbbq 16(%r8), %rbx
sbbq 24(%r8), %rbp
sbbq %rax, %rax
shldq $0x01, %rbp, %rax
imulq $-19, %rax
andq %rdx, %rbp
# Add modulus (if underflow)
subq %rax, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
movq 16(%rsp), %rcx
addq $32, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rdi)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rdi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rdi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rdi)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rdi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rdi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rdi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rdi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rdi)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rdi)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
addq $0x60, %r8
addq $0x40, %rcx
addq $0x40, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%r8)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%r8)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%r8)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%r8)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%r8)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%r8)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%r8)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%r8)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%r8)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%r8)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%r8)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%r8)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%r8)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
subq $32, %rcx
subq $0x60, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rdi)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rdi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rdi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rdi)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rdi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rdi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rdi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rdi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rdi)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rdi)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
# Add-Sub
# Add
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %rax, %rax
shldq $0x01, %rbp, %rax
imulq $-19, %rax
andq %rdx, %rbp
# Add modulus (if underflow)
subq %rax, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
movq %r14, (%rdi)
movq %r15, 8(%rdi)
movq %rbx, 16(%rdi)
movq %rbp, 24(%rdi)
subq $32, %r8
addq $0x40, %rdi
# Double
movq (%r8), %r10
movq 8(%r8), %r11
addq %r10, %r10
movq 16(%r8), %r12
adcq %r11, %r11
movq 24(%r8), %r13
adcq %r12, %r12
adcq %r13, %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
movq %rdi, %rsi
addq $32, %rsi
# Add-Sub
# Add
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %rax, %rax
shldq $0x01, %rbp, %rax
imulq $-19, %rax
andq %rdx, %rbp
# Add modulus (if underflow)
subq %rax, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
movq %r14, (%rdi)
movq %r15, 8(%rdi)
movq %rbx, 16(%rdi)
movq %rbp, 24(%rdi)
addq $24, %rsp
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_msub_x64,.-ge_msub_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_add_x64
.type ge_add_x64,@function
.align 16
ge_add_x64:
#else
.section __TEXT,__text
.globl _ge_add_x64
.p2align 4
_ge_add_x64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %rcx
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rcx, 16(%rsp)
movq %rsi, %r8
movq %rsi, %rcx
addq $32, %rcx
movq %rdi, %rsi
addq $32, %rsi
# Add-Sub
# Add
movq (%rcx), %r10
movq 8(%rcx), %r11
movq 16(%rcx), %r12
movq 24(%rcx), %r13
movq %r10, %r14
addq (%r8), %r10
movq %r11, %r15
adcq 8(%r8), %r11
movq %r12, %rbx
adcq 16(%r8), %r12
movq %r13, %rbp
adcq 24(%r8), %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%r8), %r14
sbbq 8(%r8), %r15
sbbq 16(%r8), %rbx
sbbq 24(%r8), %rbp
sbbq %rax, %rax
shldq $0x01, %rbp, %rax
imulq $-19, %rax
andq %rdx, %rbp
# Add modulus (if underflow)
subq %rax, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
movq 16(%rsp), %rcx
addq $32, %rcx
addq $32, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rdi)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rdi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rdi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rdi)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rdi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rdi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rdi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rdi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rdi)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rdi)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
addq $0x60, %r8
addq $0x40, %rcx
addq $0x40, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%r8)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%r8)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%r8)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%r8)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%r8)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%r8)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%r8)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%r8)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%r8)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%r8)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%r8)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%r8)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%r8)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
subq $0x60, %rcx
subq $0x60, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rdi)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rdi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rdi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rdi)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rdi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rdi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rdi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rdi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rdi)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rdi)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
# Add-Sub
# Add
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %rax, %rax
shldq $0x01, %rbp, %rax
imulq $-19, %rax
andq %rdx, %rbp
# Add modulus (if underflow)
subq %rax, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
movq %r14, (%rdi)
movq %r15, 8(%rdi)
movq %rbx, 16(%rdi)
movq %rbp, 24(%rdi)
subq $32, %r8
addq $0x40, %rcx
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%r8)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%r8)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%r8)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%r8)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%r8)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%r8)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%r8)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%r8)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%r8)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%r8)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%r8)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%r8)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%r8)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
addq $0x40, %rdi
# Double
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
movq %rdi, %rsi
addq $32, %rsi
# Add-Sub
# Add
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %rax, %rax
shldq $0x01, %rbp, %rax
imulq $-19, %rax
andq %rdx, %rbp
# Add modulus (if underflow)
subq %rax, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
addq $24, %rsp
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_add_x64,.-ge_add_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_sub_x64
.type ge_sub_x64,@function
.align 16
ge_sub_x64:
#else
.section __TEXT,__text
.globl _ge_sub_x64
.p2align 4
_ge_sub_x64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %rcx
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rcx, 16(%rsp)
movq %rsi, %r8
movq %rsi, %rcx
addq $32, %rcx
movq %rdi, %rsi
addq $32, %rsi
# Add-Sub
# Add
movq (%rcx), %r10
movq 8(%rcx), %r11
movq 16(%rcx), %r12
movq 24(%rcx), %r13
movq %r10, %r14
addq (%r8), %r10
movq %r11, %r15
adcq 8(%r8), %r11
movq %r12, %rbx
adcq 16(%r8), %r12
movq %r13, %rbp
adcq 24(%r8), %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%r8), %r14
sbbq 8(%r8), %r15
sbbq 16(%r8), %rbx
sbbq 24(%r8), %rbp
sbbq %rax, %rax
shldq $0x01, %rbp, %rax
imulq $-19, %rax
andq %rdx, %rbp
# Add modulus (if underflow)
subq %rax, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
movq 16(%rsp), %rcx
addq $32, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rdi)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rdi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rdi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rdi)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rdi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rdi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rdi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rdi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rdi)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rdi)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
addq $0x60, %r8
addq $0x60, %rcx
addq $0x40, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%r8)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%r8)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%r8)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%r8)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%r8)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%r8)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%r8)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%r8)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%r8)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%r8)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%r8)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%r8)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%r8)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
subq $0x40, %rcx
subq $0x60, %rdi
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rdi)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rdi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rdi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rdi)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rdi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rdi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rdi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rdi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rdi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rdi)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rdi)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rdi)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
# Add-Sub
# Add
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %rax, %rax
shldq $0x01, %rbp, %rax
imulq $-19, %rax
andq %rdx, %rbp
# Add modulus (if underflow)
subq %rax, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
movq %r14, (%rdi)
movq %r15, 8(%rdi)
movq %rbx, 16(%rdi)
movq %rbp, 24(%rdi)
subq $32, %r8
addq $32, %rcx
# Multiply
# A[0] * B[0]
movq (%rcx), %rax
mulq (%r8)
movq %rax, %r10
movq %rdx, %r11
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%r8)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%r8)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%r8)
addq %rax, %r12
adcq %rdx, %r13
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%r8)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%r8)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%r8)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%r8)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%r8)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%r8)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%r8)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%r8)
xorq %rbp, %rbp
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%r8)
addq %rax, %r15
adcq %rdx, %rbx
adcq $0x00, %rbp
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%r8)
addq %rax, %rbx
adcq %rdx, %rbp
movq $38, %rax
mulq %rbp
addq %rax, %r13
adcq $0x00, %rdx
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %rdx
imulq $19, %rdx, %rdx
andq %r9, %r13
movq %rdx, %r9
movq $38, %rax
mulq %r14
xorq %r14, %r14
addq %rax, %r10
movq $38, %rax
adcq %rdx, %r14
mulq %r15
xorq %r15, %r15
addq %rax, %r11
movq $38, %rax
adcq %rdx, %r15
mulq %rbx
xorq %rbx, %rbx
addq %rax, %r12
adcq %rdx, %rbx
addq %r9, %r10
adcq %r14, %r11
adcq %r15, %r12
adcq %rbx, %r13
# Store
# Double
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
movq %rdi, %rsi
addq $0x40, %rsi
addq $0x60, %rdi
# Add-Sub
# Add
movq %r10, %r14
addq (%rdi), %r10
movq %r11, %r15
adcq 8(%rdi), %r11
movq %r12, %rbx
adcq 16(%rdi), %r12
movq %r13, %rbp
adcq 24(%rdi), %r13
movq $0x00, %rax
adcq $0x00, %rax
shldq $0x01, %r13, %rax
movq $0x7fffffffffffffff, %rdx
imulq $19, %rax
andq %rdx, %r13
# Sub modulus (if overflow)
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rdi), %r14
sbbq 8(%rdi), %r15
sbbq 16(%rdi), %rbx
sbbq 24(%rdi), %rbp
sbbq %rax, %rax
shldq $0x01, %rbp, %rax
imulq $-19, %rax
andq %rdx, %rbp
# Add modulus (if underflow)
subq %rax, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
addq $24, %rsp
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_sub_x64,.-ge_sub_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl sc_reduce_x64
.type sc_reduce_x64,@function
.align 16
sc_reduce_x64:
#else
.section __TEXT,__text
.globl _sc_reduce_x64
.p2align 4
_sc_reduce_x64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq (%rdi), %r8
movq 8(%rdi), %r9
movq 16(%rdi), %r10
movq 24(%rdi), %r11
movq 32(%rdi), %r12
movq 40(%rdi), %r13
movq 48(%rdi), %r14
movq 56(%rdi), %r15
movq %r15, %rcx
movq $0xfffffffffffffff, %rsi
shrq $56, %rcx
shldq $4, %r14, %r15
shldq $4, %r13, %r14
shldq $4, %r12, %r13
shldq $4, %r11, %r12
andq %rsi, %r11
andq %rsi, %r15
# Add order times bits 504..511
subq %rcx, %r14
sbbq $0x00, %r15
movq $0xeb2106215d086329, %rax
mulq %rcx
movq $0x00, %rsi
addq %rax, %r13
movq $0xa7ed9ce5a30a2c13, %rax
adcq %rdx, %rsi
mulq %rcx
addq %rax, %r12
adcq %rdx, %r13
adcq %rsi, %r14
adcq $0x00, %r15
# Sub product of top 4 words and order
movq $0xa7ed9ce5a30a2c13, %rcx
movq %r12, %rax
mulq %rcx
movq $0x00, %rbp
addq %rax, %r8
adcq %rdx, %rbp
movq %r13, %rax
mulq %rcx
movq $0x00, %rsi
addq %rax, %r9
adcq %rdx, %rsi
movq %r14, %rax
mulq %rcx
addq %rbp, %r9
adcq %rax, %r10
adcq %rdx, %r11
movq $0x00, %rbx
adcq $0x00, %rbx
movq %r15, %rax
mulq %rcx
addq %rsi, %r10
adcq %rax, %r11
adcq %rdx, %rbx
movq $0xeb2106215d086329, %rcx
movq %r12, %rax
mulq %rcx
movq $0x00, %rbp
addq %rax, %r9
adcq %rdx, %rbp
movq %r13, %rax
mulq %rcx
movq $0x00, %rsi
addq %rax, %r10
adcq %rdx, %rsi
movq %r14, %rax
mulq %rcx
addq %rbp, %r10
adcq %rax, %r11
adcq %rdx, %rbx
movq $0x00, %rbp
adcq $0x00, %rbp
movq %r15, %rax
mulq %rcx
addq %rsi, %r11
adcq %rax, %rbx
adcq %rdx, %rbp
subq %r12, %r10
movq %rbx, %r12
sbbq %r13, %r11
movq %rbp, %r13
sbbq %r14, %r12
sbbq %r15, %r13
movq %r13, %rcx
sarq $57, %rcx
# Conditionally subtract order starting at bit 125
movq $0xa000000000000000, %rax
movq $0xcb024c634b9eba7d, %rdx
movq $0x29bdf3bd45ef39a, %rbx
movq $0x200000000000000, %rbp
andq %rcx, %rax
andq %rcx, %rdx
andq %rcx, %rbx
andq %rcx, %rbp
addq %rax, %r9
adcq %rdx, %r10
adcq %rbx, %r11
adcq $0x00, %r12
adcq %rbp, %r13
# Move bits 252-376 to own registers
movq $0xfffffffffffffff, %rcx
shldq $4, %r12, %r13
shldq $4, %r11, %r12
andq %rcx, %r11
# Sub product of top 2 words and order
# * -5812631a5cf5d3ed
movq $0xa7ed9ce5a30a2c13, %rcx
movq %r12, %rax
mulq %rcx
movq $0x00, %rbx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rbx
movq %r13, %rax
mulq %rcx
addq %rax, %r9
adcq %rdx, %rbx
# * -14def9dea2f79cd7
movq $0xeb2106215d086329, %rcx
movq %r12, %rax
mulq %rcx
movq $0x00, %rbp
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %rbp
movq %r13, %rax
mulq %rcx
addq %rax, %r10
adcq %rdx, %rbp
# Add overflows at 2 * 64
movq $0xfffffffffffffff, %rsi
andq %rsi, %r11
addq %rbx, %r10
adcq %rbp, %r11
# Subtract top at 2 * 64
subq %r12, %r10
sbbq %r13, %r11
sbbq %rsi, %rsi
# Conditional sub order
movq $0x5812631a5cf5d3ed, %rax
movq $0x14def9dea2f79cd6, %rdx
movq $0x1000000000000000, %rbx
andq %rsi, %rax
andq %rsi, %rdx
andq %rsi, %rbx
addq %rax, %r8
movq $0xfffffffffffffff, %rax
adcq %rdx, %r9
adcq $0x00, %r10
adcq %rbx, %r11
andq %rax, %r11
# Store result
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sc_reduce_x64,.-sc_reduce_x64
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl sc_muladd_x64
.type sc_muladd_x64,@function
.align 16
sc_muladd_x64:
#else
.section __TEXT,__text
.globl _sc_muladd_x64
.p2align 4
_sc_muladd_x64:
#endif /* __APPLE__ */
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
movq %rdx, %rbp
# Multiply
# A[0] * B[0]
movq (%rbp), %rax
mulq (%rsi)
movq %rax, %r8
movq %rdx, %r9
# A[0] * B[1]
movq 8(%rbp), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r9
adcq %rdx, %r10
# A[1] * B[0]
movq (%rbp), %rax
mulq 8(%rsi)
xorq %r11, %r11
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r11
# A[0] * B[2]
movq 16(%rbp), %rax
mulq (%rsi)
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[1]
movq 8(%rbp), %rax
mulq 8(%rsi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * B[0]
movq (%rbp), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[3]
movq 24(%rbp), %rax
mulq (%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[2]
movq 16(%rbp), %rax
mulq 8(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[1]
movq 8(%rbp), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * B[0]
movq (%rbp), %rax
mulq 24(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[3]
movq 24(%rbp), %rax
mulq 8(%rsi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[2]
movq 16(%rbp), %rax
mulq 16(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[1]
movq 8(%rbp), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[3]
movq 24(%rbp), %rax
mulq 16(%rsi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[2]
movq 16(%rbp), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[3]
movq 24(%rbp), %rax
mulq 24(%rsi)
addq %rax, %r14
adcq %rdx, %r15
# Add c to a * b
addq (%rcx), %r8
adcq 8(%rcx), %r9
adcq 16(%rcx), %r10
adcq 24(%rcx), %r11
adcq $0x00, %r12
adcq $0x00, %r13
adcq $0x00, %r14
adcq $0x00, %r15
movq %r15, %rbx
movq $0xfffffffffffffff, %rcx
shrq $56, %rbx
shldq $4, %r14, %r15
shldq $4, %r13, %r14
shldq $4, %r12, %r13
shldq $4, %r11, %r12
andq %rcx, %r11
andq %rcx, %r15
# Add order times bits 504..507
subq %rbx, %r14
sbbq $0x00, %r15
movq $0xeb2106215d086329, %rax
mulq %rbx
movq $0x00, %rcx
addq %rax, %r13
movq $0xa7ed9ce5a30a2c13, %rax
adcq %rdx, %rcx
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
adcq %rcx, %r14
adcq $0x00, %r15
# Sub product of top 4 words and order
movq $0xa7ed9ce5a30a2c13, %rbx
movq %r12, %rax
mulq %rbx
movq $0x00, %rbp
addq %rax, %r8
adcq %rdx, %rbp
movq %r13, %rax
mulq %rbx
movq $0x00, %rcx
addq %rax, %r9
adcq %rdx, %rcx
movq %r14, %rax
mulq %rbx
addq %rbp, %r9
adcq %rax, %r10
adcq %rdx, %r11
movq $0x00, %rsi
adcq $0x00, %rsi
movq %r15, %rax
mulq %rbx
addq %rcx, %r10
adcq %rax, %r11
adcq %rdx, %rsi
movq $0xeb2106215d086329, %rbx
movq %r12, %rax
mulq %rbx
movq $0x00, %rbp
addq %rax, %r9
adcq %rdx, %rbp
movq %r13, %rax
mulq %rbx
movq $0x00, %rcx
addq %rax, %r10
adcq %rdx, %rcx
movq %r14, %rax
mulq %rbx
addq %rbp, %r10
adcq %rax, %r11
adcq %rdx, %rsi
movq $0x00, %rbp
adcq $0x00, %rbp
movq %r15, %rax
mulq %rbx
addq %rcx, %r11
adcq %rax, %rsi
adcq %rdx, %rbp
subq %r12, %r10
movq %rsi, %r12
sbbq %r13, %r11
movq %rbp, %r13
sbbq %r14, %r12
sbbq %r15, %r13
movq %r13, %rbx
sarq $57, %rbx
# Conditionally subtract order starting at bit 125
movq $0xa000000000000000, %rax
movq $0xcb024c634b9eba7d, %rdx
movq $0x29bdf3bd45ef39a, %rsi
movq $0x200000000000000, %rbp
andq %rbx, %rax
andq %rbx, %rdx
andq %rbx, %rsi
andq %rbx, %rbp
addq %rax, %r9
adcq %rdx, %r10
adcq %rsi, %r11
adcq $0x00, %r12
adcq %rbp, %r13
# Move bits 252-376 to own registers
movq $0xfffffffffffffff, %rbx
shldq $4, %r12, %r13
shldq $4, %r11, %r12
andq %rbx, %r11
# Sub product of top 2 words and order
# * -5812631a5cf5d3ed
movq $0xa7ed9ce5a30a2c13, %rbx
movq %r12, %rax
mulq %rbx
movq $0x00, %rsi
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rsi
movq %r13, %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %rsi
# * -14def9dea2f79cd7
movq $0xeb2106215d086329, %rbx
movq %r12, %rax
mulq %rbx
movq $0x00, %rbp
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %rbp
movq %r13, %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %rbp
# Add overflows at 2 * 64
movq $0xfffffffffffffff, %rcx
andq %rcx, %r11
addq %rsi, %r10
adcq %rbp, %r11
# Subtract top at 2 * 64
subq %r12, %r10
sbbq %r13, %r11
sbbq %rcx, %rcx
# Conditional sub order
movq $0x5812631a5cf5d3ed, %rax
movq $0x14def9dea2f79cd6, %rdx
movq $0x1000000000000000, %rsi
andq %rcx, %rax
andq %rcx, %rdx
andq %rcx, %rsi
addq %rax, %r8
movq $0xfffffffffffffff, %rax
adcq %rdx, %r9
adcq $0x00, %r10
adcq %rsi, %r11
andq %rax, %r11
# Store result
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
repz retq
#ifndef __APPLE__
.size sc_muladd_x64,.-sc_muladd_x64
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#ifdef HAVE_INTEL_AVX2
#ifndef __APPLE__
.text
.globl fe_mul_avx2
.type fe_mul_avx2,@function
.align 16
fe_mul_avx2:
#else
.section __TEXT,__text
.globl _fe_mul_avx2
.p2align 4
_fe_mul_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
movq %rdx, %rbx
# Multiply
# A[0] * B[0]
movq (%rbx), %rdx
mulxq (%rsi), %r8, %r9
# A[2] * B[0]
mulxq 16(%rsi), %r10, %r11
# A[1] * B[0]
mulxq 8(%rsi), %rax, %rcx
xorq %r15, %r15
adcxq %rax, %r9
# A[3] * B[1]
movq 8(%rbx), %rdx
mulxq 24(%rsi), %r12, %r13
adcxq %rcx, %r10
# A[0] * B[1]
mulxq (%rsi), %rax, %rcx
adoxq %rax, %r9
# A[2] * B[1]
mulxq 16(%rsi), %rax, %r14
adoxq %rcx, %r10
adcxq %rax, %r11
# A[1] * B[2]
movq 16(%rbx), %rdx
mulxq 8(%rsi), %rax, %rcx
adcxq %r14, %r12
adoxq %rax, %r11
adcxq %r15, %r13
adoxq %rcx, %r12
# A[0] * B[2]
mulxq (%rsi), %rax, %rcx
adoxq %r15, %r13
xorq %r14, %r14
adcxq %rax, %r10
# A[1] * B[1]
movq 8(%rbx), %rdx
mulxq 8(%rsi), %rdx, %rax
adcxq %rcx, %r11
adoxq %rdx, %r10
# A[1] * B[3]
movq 24(%rbx), %rdx
adoxq %rax, %r11
mulxq 8(%rsi), %rax, %rcx
adcxq %rax, %r12
# A[2] * B[2]
movq 16(%rbx), %rdx
mulxq 16(%rsi), %rdx, %rax
adcxq %rcx, %r13
adoxq %rdx, %r12
# A[3] * B[3]
movq 24(%rbx), %rdx
adoxq %rax, %r13
mulxq 24(%rsi), %rax, %rcx
adoxq %r15, %r14
adcxq %rax, %r14
# A[0] * B[3]
mulxq (%rsi), %rdx, %rax
adcxq %rcx, %r15
xorq %rcx, %rcx
adcxq %rdx, %r11
# A[3] * B[0]
movq 24(%rsi), %rdx
adcxq %rax, %r12
mulxq (%rbx), %rdx, %rax
adoxq %rdx, %r11
adoxq %rax, %r12
# A[3] * B[2]
movq 24(%rsi), %rdx
mulxq 16(%rbx), %rdx, %rax
adcxq %rdx, %r13
# A[2] * B[3]
movq 24(%rbx), %rdx
adcxq %rax, %r14
mulxq 16(%rsi), %rax, %rdx
adcxq %rcx, %r15
adoxq %rax, %r13
adoxq %rdx, %r14
adoxq %rcx, %r15
movq $38, %rdx
mulxq %r15, %r15, %rax
addq %r15, %r11
adcq $0x00, %rax
movq $0x7fffffffffffffff, %rcx
shldq $0x01, %r11, %rax
imulq $19, %rax, %rax
andq %rcx, %r11
xorq %rcx, %rcx
adoxq %rax, %r8
mulxq %r12, %rax, %r12
adcxq %rax, %r8
adoxq %r12, %r9
mulxq %r13, %rax, %r13
adcxq %rax, %r9
adoxq %r13, %r10
mulxq %r14, %rax, %r14
adcxq %rax, %r10
adoxq %r14, %r11
adcxq %rcx, %r11
movq $0x7fffffffffffffff, %rcx
movq %r11, %rdx
sarq $63, %rdx
andq $19, %rdx
andq %rcx, %r11
addq %rdx, %r8
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
# Store
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size fe_mul_avx2,.-fe_mul_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_sq_avx2
.type fe_sq_avx2,@function
.align 16
fe_sq_avx2:
#else
.section __TEXT,__text
.globl _fe_sq_avx2
.p2align 4
_fe_sq_avx2:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
# Square
movq (%rsi), %rdx
movq 8(%rsi), %rax
# A[0] * A[1]
movq %rdx, %r15
mulxq %rax, %r9, %r10
# A[0] * A[3]
mulxq 24(%rsi), %r11, %r12
# A[2] * A[1]
movq 16(%rsi), %rdx
mulxq %rax, %rcx, %rbx
xorq %r8, %r8
adoxq %rcx, %r11
# A[2] * A[3]
mulxq 24(%rsi), %r13, %r14
adoxq %rbx, %r12
# A[2] * A[0]
mulxq %r15, %rcx, %rbx
adoxq %r8, %r13
adcxq %rcx, %r10
adoxq %r8, %r14
# A[1] * A[3]
movq %rax, %rdx
mulxq 24(%rsi), %rcx, %rdx
adcxq %rbx, %r11
adcxq %rcx, %r12
adcxq %rdx, %r13
adcxq %r8, %r14
# A[0] * A[0]
movq %r15, %rdx
mulxq %rdx, %r8, %rcx
xorq %r15, %r15
adcxq %r9, %r9
# A[1] * A[1]
movq %rax, %rdx
adoxq %rcx, %r9
mulxq %rdx, %rcx, %rbx
adcxq %r10, %r10
adoxq %rcx, %r10
adcxq %r11, %r11
# A[2] * A[2]
movq 16(%rsi), %rdx
adoxq %rbx, %r11
mulxq %rdx, %rbx, %rcx
adcxq %r12, %r12
adoxq %rbx, %r12
adcxq %r13, %r13
# A[3] * A[3]
movq 24(%rsi), %rdx
adoxq %rcx, %r13
mulxq %rdx, %rcx, %rbx
adcxq %r14, %r14
adoxq %rcx, %r14
adcxq %r15, %r15
adoxq %rbx, %r15
movq $38, %rdx
mulxq %r15, %r15, %rbx
addq %r15, %r11
adcq $0x00, %rbx
movq $0x7fffffffffffffff, %rcx
shldq $0x01, %r11, %rbx
imulq $19, %rbx, %rbx
andq %rcx, %r11
xorq %rcx, %rcx
adoxq %rbx, %r8
mulxq %r12, %rbx, %r12
adcxq %rbx, %r8
adoxq %r12, %r9
mulxq %r13, %rbx, %r13
adcxq %rbx, %r9
adoxq %r13, %r10
mulxq %r14, %rbx, %r14
adcxq %rbx, %r10
adoxq %r14, %r11
adcxq %rcx, %r11
movq $0x7fffffffffffffff, %rcx
movq %r11, %rdx
sarq $63, %rdx
andq $19, %rdx
andq %rcx, %r11
addq %rdx, %r8
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
# Store
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size fe_sq_avx2,.-fe_sq_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_sq_n_avx2
.type fe_sq_n_avx2,@function
.align 16
fe_sq_n_avx2:
#else
.section __TEXT,__text
.globl _fe_sq_n_avx2
.p2align 4
_fe_sq_n_avx2:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
movq %rdx, %rbp
L_fe_sq_n_avx2:
# Square
movq (%rsi), %rdx
movq 8(%rsi), %rax
# A[0] * A[1]
movq %rdx, %r15
mulxq %rax, %r9, %r10
# A[0] * A[3]
mulxq 24(%rsi), %r11, %r12
# A[2] * A[1]
movq 16(%rsi), %rdx
mulxq %rax, %rcx, %rbx
xorq %r8, %r8
adoxq %rcx, %r11
# A[2] * A[3]
mulxq 24(%rsi), %r13, %r14
adoxq %rbx, %r12
# A[2] * A[0]
mulxq %r15, %rcx, %rbx
adoxq %r8, %r13
adcxq %rcx, %r10
adoxq %r8, %r14
# A[1] * A[3]
movq %rax, %rdx
mulxq 24(%rsi), %rcx, %rdx
adcxq %rbx, %r11
adcxq %rcx, %r12
adcxq %rdx, %r13
adcxq %r8, %r14
# A[0] * A[0]
movq %r15, %rdx
mulxq %rdx, %r8, %rcx
xorq %r15, %r15
adcxq %r9, %r9
# A[1] * A[1]
movq %rax, %rdx
adoxq %rcx, %r9
mulxq %rdx, %rcx, %rbx
adcxq %r10, %r10
adoxq %rcx, %r10
adcxq %r11, %r11
# A[2] * A[2]
movq 16(%rsi), %rdx
adoxq %rbx, %r11
mulxq %rdx, %rbx, %rcx
adcxq %r12, %r12
adoxq %rbx, %r12
adcxq %r13, %r13
# A[3] * A[3]
movq 24(%rsi), %rdx
adoxq %rcx, %r13
mulxq %rdx, %rcx, %rbx
adcxq %r14, %r14
adoxq %rcx, %r14
adcxq %r15, %r15
adoxq %rbx, %r15
movq $38, %rdx
mulxq %r15, %r15, %rbx
addq %r15, %r11
adcq $0x00, %rbx
movq $0x7fffffffffffffff, %rcx
shldq $0x01, %r11, %rbx
imulq $19, %rbx, %rbx
andq %rcx, %r11
xorq %rcx, %rcx
adoxq %rbx, %r8
mulxq %r12, %rbx, %r12
adcxq %rbx, %r8
adoxq %r12, %r9
mulxq %r13, %rbx, %r13
adcxq %rbx, %r9
adoxq %r13, %r10
mulxq %r14, %rbx, %r14
adcxq %rbx, %r10
adoxq %r14, %r11
adcxq %rcx, %r11
# Store
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
decb %bpl
jnz L_fe_sq_n_avx2
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size fe_sq_n_avx2,.-fe_sq_n_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_mul121666_avx2
.type fe_mul121666_avx2,@function
.align 16
fe_mul121666_avx2:
#else
.section __TEXT,__text
.globl _fe_mul121666_avx2
.p2align 4
_fe_mul121666_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
movq $0x1db42, %rdx
mulxq (%rsi), %rax, %r13
mulxq 8(%rsi), %rcx, %r12
mulxq 16(%rsi), %r8, %r11
mulxq 24(%rsi), %r9, %r10
addq %r13, %rcx
adcq %r12, %r8
adcq %r11, %r9
adcq $0x00, %r10
movq $0x7fffffffffffffff, %r13
shldq $0x01, %r9, %r10
andq %r13, %r9
imulq $19, %r10, %r10
addq %r10, %rax
adcq $0x00, %rcx
adcq $0x00, %r8
adcq $0x00, %r9
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size fe_mul121666_avx2,.-fe_mul121666_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_invert_avx2
.type fe_invert_avx2,@function
.align 16
fe_invert_avx2:
#else
.section __TEXT,__text
.globl _fe_invert_avx2
.p2align 4
_fe_invert_avx2:
#endif /* __APPLE__ */
subq $0x90, %rsp
# Invert
movq %rdi, 128(%rsp)
movq %rsi, 136(%rsp)
movq %rsp, %rdi
movq 136(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq 136(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
movq %rsp, %rdi
movq %rsp, %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $4, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $9, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $19, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $9, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $49, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $0x63, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $49, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq $4, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
movq 128(%rsp), %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
movq 136(%rsp), %rsi
movq 128(%rsp), %rdi
addq $0x90, %rsp
repz retq
#ifndef __APPLE__
.text
.globl curve25519_avx2
.type curve25519_avx2,@function
.align 16
curve25519_avx2:
#else
.section __TEXT,__text
.globl _curve25519_avx2
.p2align 4
_curve25519_avx2:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
movq %rdx, %r8
subq $0xb8, %rsp
movq $0x00, 176(%rsp)
movq %rdi, 168(%rsp)
# Set one
movq $0x01, (%rdi)
movq $0x00, 8(%rdi)
movq $0x00, 16(%rdi)
movq $0x00, 24(%rdi)
# Set zero
movq $0x00, (%rsp)
movq $0x00, 8(%rsp)
movq $0x00, 16(%rsp)
movq $0x00, 24(%rsp)
# Set one
movq $0x01, 32(%rsp)
movq $0x00, 40(%rsp)
movq $0x00, 48(%rsp)
movq $0x00, 56(%rsp)
# Copy
movq (%r8), %r9
movq 8(%r8), %r10
movq 16(%r8), %r11
movq 24(%r8), %r12
movq %r9, 64(%rsp)
movq %r10, 72(%rsp)
movq %r11, 80(%rsp)
movq %r12, 88(%rsp)
movq $0xfe, %rbx
L_curve25519_avx2_bits:
movq 176(%rsp), %rax
movq %rbx, 160(%rsp)
movq %rbx, %rcx
andq $63, %rcx
shrq $6, %rbx
movq (%rsi,%rbx,8), %rbx
shrq %cl, %rbx
andq $0x01, %rbx
xorq %rbx, %rax
negq %rax
# Conditional Swap
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
xorq 64(%rsp), %r9
xorq 72(%rsp), %r10
xorq 80(%rsp), %r11
xorq 88(%rsp), %r12
andq %rax, %r9
andq %rax, %r10
andq %rax, %r11
andq %rax, %r12
xorq %r9, (%rdi)
xorq %r10, 8(%rdi)
xorq %r11, 16(%rdi)
xorq %r12, 24(%rdi)
xorq %r9, 64(%rsp)
xorq %r10, 72(%rsp)
xorq %r11, 80(%rsp)
xorq %r12, 88(%rsp)
# Conditional Swap
movq (%rsp), %r9
movq 8(%rsp), %r10
movq 16(%rsp), %r11
movq 24(%rsp), %r12
xorq 32(%rsp), %r9
xorq 40(%rsp), %r10
xorq 48(%rsp), %r11
xorq 56(%rsp), %r12
andq %rax, %r9
andq %rax, %r10
andq %rax, %r11
andq %rax, %r12
xorq %r9, (%rsp)
xorq %r10, 8(%rsp)
xorq %r11, 16(%rsp)
xorq %r12, 24(%rsp)
xorq %r9, 32(%rsp)
xorq %r10, 40(%rsp)
xorq %r11, 48(%rsp)
xorq %r12, 56(%rsp)
movq %rbx, 176(%rsp)
# Add-Sub
# Add
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
movq %r9, %r13
addq (%rsp), %r9
movq %r10, %r14
adcq 8(%rsp), %r10
movq %r11, %r15
adcq 16(%rsp), %r11
movq %r12, %rbp
adcq 24(%rsp), %r12
movq $0x00, %rcx
adcq $0x00, %rcx
shldq $0x01, %r12, %rcx
movq $0x7fffffffffffffff, %rbx
imulq $19, %rcx
andq %rbx, %r12
# Sub modulus (if overflow)
addq %rcx, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
# Sub
subq (%rsp), %r13
sbbq 8(%rsp), %r14
sbbq 16(%rsp), %r15
sbbq 24(%rsp), %rbp
sbbq %rcx, %rcx
shldq $0x01, %rbp, %rcx
imulq $-19, %rcx
andq %rbx, %rbp
# Add modulus (if underflow)
subq %rcx, %r13
sbbq $0x00, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbp
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
movq %r13, 128(%rsp)
movq %r14, 136(%rsp)
movq %r15, 144(%rsp)
movq %rbp, 152(%rsp)
# Add-Sub
# Add
movq 64(%rsp), %r9
movq 72(%rsp), %r10
movq 80(%rsp), %r11
movq 88(%rsp), %r12
movq %r9, %r13
addq 32(%rsp), %r9
movq %r10, %r14
adcq 40(%rsp), %r10
movq %r11, %r15
adcq 48(%rsp), %r11
movq %r12, %rbp
adcq 56(%rsp), %r12
movq $0x00, %rcx
adcq $0x00, %rcx
shldq $0x01, %r12, %rcx
movq $0x7fffffffffffffff, %rbx
imulq $19, %rcx
andq %rbx, %r12
# Sub modulus (if overflow)
addq %rcx, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
# Sub
subq 32(%rsp), %r13
sbbq 40(%rsp), %r14
sbbq 48(%rsp), %r15
sbbq 56(%rsp), %rbp
sbbq %rcx, %rcx
shldq $0x01, %rbp, %rcx
imulq $-19, %rcx
andq %rbx, %rbp
# Add modulus (if underflow)
subq %rcx, %r13
sbbq $0x00, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbp
movq %r9, 32(%rsp)
movq %r10, 40(%rsp)
movq %r11, 48(%rsp)
movq %r12, 56(%rsp)
movq %r13, 96(%rsp)
movq %r14, 104(%rsp)
movq %r15, 112(%rsp)
movq %rbp, 120(%rsp)
# Multiply
# A[0] * B[0]
movq 128(%rsp), %rdx
mulxq 32(%rsp), %r9, %r10
# A[2] * B[0]
mulxq 48(%rsp), %r11, %r12
# A[1] * B[0]
mulxq 40(%rsp), %rcx, %rbx
xorq %rbp, %rbp
adcxq %rcx, %r10
# A[3] * B[1]
movq 136(%rsp), %rdx
mulxq 56(%rsp), %r13, %r14
adcxq %rbx, %r11
# A[0] * B[1]
mulxq 32(%rsp), %rcx, %rbx
adoxq %rcx, %r10
# A[2] * B[1]
mulxq 48(%rsp), %rcx, %r15
adoxq %rbx, %r11
adcxq %rcx, %r12
# A[1] * B[2]
movq 144(%rsp), %rdx
mulxq 40(%rsp), %rcx, %rbx
adcxq %r15, %r13
adoxq %rcx, %r12
adcxq %rbp, %r14
adoxq %rbx, %r13
# A[0] * B[2]
mulxq 32(%rsp), %rcx, %rbx
adoxq %rbp, %r14
xorq %r15, %r15
adcxq %rcx, %r11
# A[1] * B[1]
movq 136(%rsp), %rdx
mulxq 40(%rsp), %rdx, %rcx
adcxq %rbx, %r12
adoxq %rdx, %r11
# A[1] * B[3]
movq 152(%rsp), %rdx
adoxq %rcx, %r12
mulxq 40(%rsp), %rcx, %rbx
adcxq %rcx, %r13
# A[2] * B[2]
movq 144(%rsp), %rdx
mulxq 48(%rsp), %rdx, %rcx
adcxq %rbx, %r14
adoxq %rdx, %r13
# A[3] * B[3]
movq 152(%rsp), %rdx
adoxq %rcx, %r14
mulxq 56(%rsp), %rcx, %rbx
adoxq %rbp, %r15
adcxq %rcx, %r15
# A[0] * B[3]
mulxq 32(%rsp), %rdx, %rcx
adcxq %rbx, %rbp
xorq %rbx, %rbx
adcxq %rdx, %r12
# A[3] * B[0]
movq 56(%rsp), %rdx
adcxq %rcx, %r13
mulxq 128(%rsp), %rdx, %rcx
adoxq %rdx, %r12
adoxq %rcx, %r13
# A[3] * B[2]
movq 56(%rsp), %rdx
mulxq 144(%rsp), %rdx, %rcx
adcxq %rdx, %r14
# A[2] * B[3]
movq 152(%rsp), %rdx
adcxq %rcx, %r15
mulxq 48(%rsp), %rcx, %rdx
adcxq %rbx, %rbp
adoxq %rcx, %r14
adoxq %rdx, %r15
adoxq %rbx, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %rcx
addq %rbp, %r12
adcq $0x00, %rcx
movq $0x7fffffffffffffff, %rbx
shldq $0x01, %r12, %rcx
imulq $19, %rcx, %rcx
andq %rbx, %r12
xorq %rbx, %rbx
adoxq %rcx, %r9
mulxq %r13, %rcx, %r13
adcxq %rcx, %r9
adoxq %r13, %r10
mulxq %r14, %rcx, %r14
adcxq %rcx, %r10
adoxq %r14, %r11
mulxq %r15, %rcx, %r15
adcxq %rcx, %r11
adoxq %r15, %r12
adcxq %rbx, %r12
# Store
movq %r9, 32(%rsp)
movq %r10, 40(%rsp)
movq %r11, 48(%rsp)
movq %r12, 56(%rsp)
# Multiply
# A[0] * B[0]
movq (%rdi), %rdx
mulxq 96(%rsp), %r9, %r10
# A[2] * B[0]
mulxq 112(%rsp), %r11, %r12
# A[1] * B[0]
mulxq 104(%rsp), %rcx, %rbx
xorq %rbp, %rbp
adcxq %rcx, %r10
# A[3] * B[1]
movq 8(%rdi), %rdx
mulxq 120(%rsp), %r13, %r14
adcxq %rbx, %r11
# A[0] * B[1]
mulxq 96(%rsp), %rcx, %rbx
adoxq %rcx, %r10
# A[2] * B[1]
mulxq 112(%rsp), %rcx, %r15
adoxq %rbx, %r11
adcxq %rcx, %r12
# A[1] * B[2]
movq 16(%rdi), %rdx
mulxq 104(%rsp), %rcx, %rbx
adcxq %r15, %r13
adoxq %rcx, %r12
adcxq %rbp, %r14
adoxq %rbx, %r13
# A[0] * B[2]
mulxq 96(%rsp), %rcx, %rbx
adoxq %rbp, %r14
xorq %r15, %r15
adcxq %rcx, %r11
# A[1] * B[1]
movq 8(%rdi), %rdx
mulxq 104(%rsp), %rdx, %rcx
adcxq %rbx, %r12
adoxq %rdx, %r11
# A[1] * B[3]
movq 24(%rdi), %rdx
adoxq %rcx, %r12
mulxq 104(%rsp), %rcx, %rbx
adcxq %rcx, %r13
# A[2] * B[2]
movq 16(%rdi), %rdx
mulxq 112(%rsp), %rdx, %rcx
adcxq %rbx, %r14
adoxq %rdx, %r13
# A[3] * B[3]
movq 24(%rdi), %rdx
adoxq %rcx, %r14
mulxq 120(%rsp), %rcx, %rbx
adoxq %rbp, %r15
adcxq %rcx, %r15
# A[0] * B[3]
mulxq 96(%rsp), %rdx, %rcx
adcxq %rbx, %rbp
xorq %rbx, %rbx
adcxq %rdx, %r12
# A[3] * B[0]
movq 120(%rsp), %rdx
adcxq %rcx, %r13
mulxq (%rdi), %rdx, %rcx
adoxq %rdx, %r12
adoxq %rcx, %r13
# A[3] * B[2]
movq 120(%rsp), %rdx
mulxq 16(%rdi), %rdx, %rcx
adcxq %rdx, %r14
# A[2] * B[3]
movq 24(%rdi), %rdx
adcxq %rcx, %r15
mulxq 112(%rsp), %rcx, %rdx
adcxq %rbx, %rbp
adoxq %rcx, %r14
adoxq %rdx, %r15
adoxq %rbx, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %rcx
addq %rbp, %r12
adcq $0x00, %rcx
movq $0x7fffffffffffffff, %rbx
shldq $0x01, %r12, %rcx
imulq $19, %rcx, %rcx
andq %rbx, %r12
xorq %rbx, %rbx
adoxq %rcx, %r9
mulxq %r13, %rcx, %r13
adcxq %rcx, %r9
adoxq %r13, %r10
mulxq %r14, %rcx, %r14
adcxq %rcx, %r10
adoxq %r14, %r11
mulxq %r15, %rcx, %r15
adcxq %rcx, %r11
adoxq %r15, %r12
adcxq %rbx, %r12
# Store
movq %r9, (%rsp)
movq %r10, 8(%rsp)
movq %r11, 16(%rsp)
movq %r12, 24(%rsp)
# Square
movq 128(%rsp), %rdx
movq 136(%rsp), %rax
# A[0] * A[1]
movq %rdx, %rbp
mulxq %rax, %r10, %r11
# A[0] * A[3]
mulxq 152(%rsp), %r12, %r13
# A[2] * A[1]
movq 144(%rsp), %rdx
mulxq %rax, %rcx, %rbx
xorq %r9, %r9
adoxq %rcx, %r12
# A[2] * A[3]
mulxq 152(%rsp), %r14, %r15
adoxq %rbx, %r13
# A[2] * A[0]
mulxq %rbp, %rcx, %rbx
adoxq %r9, %r14
adcxq %rcx, %r11
adoxq %r9, %r15
# A[1] * A[3]
movq %rax, %rdx
mulxq 152(%rsp), %rcx, %rdx
adcxq %rbx, %r12
adcxq %rcx, %r13
adcxq %rdx, %r14
adcxq %r9, %r15
# A[0] * A[0]
movq %rbp, %rdx
mulxq %rdx, %r9, %rcx
xorq %rbp, %rbp
adcxq %r10, %r10
# A[1] * A[1]
movq %rax, %rdx
adoxq %rcx, %r10
mulxq %rdx, %rcx, %rbx
adcxq %r11, %r11
adoxq %rcx, %r11
adcxq %r12, %r12
# A[2] * A[2]
movq 144(%rsp), %rdx
adoxq %rbx, %r12
mulxq %rdx, %rbx, %rcx
adcxq %r13, %r13
adoxq %rbx, %r13
adcxq %r14, %r14
# A[3] * A[3]
movq 152(%rsp), %rdx
adoxq %rcx, %r14
mulxq %rdx, %rcx, %rbx
adcxq %r15, %r15
adoxq %rcx, %r15
adcxq %rbp, %rbp
adoxq %rbx, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %rbx
addq %rbp, %r12
adcq $0x00, %rbx
movq $0x7fffffffffffffff, %rcx
shldq $0x01, %r12, %rbx
imulq $19, %rbx, %rbx
andq %rcx, %r12
xorq %rcx, %rcx
adoxq %rbx, %r9
mulxq %r13, %rbx, %r13
adcxq %rbx, %r9
adoxq %r13, %r10
mulxq %r14, %rbx, %r14
adcxq %rbx, %r10
adoxq %r14, %r11
mulxq %r15, %rbx, %r15
adcxq %rbx, %r11
adoxq %r15, %r12
adcxq %rcx, %r12
# Store
movq %r9, 96(%rsp)
movq %r10, 104(%rsp)
movq %r11, 112(%rsp)
movq %r12, 120(%rsp)
# Square
movq (%rdi), %rdx
movq 8(%rdi), %rax
# A[0] * A[1]
movq %rdx, %rbp
mulxq %rax, %r10, %r11
# A[0] * A[3]
mulxq 24(%rdi), %r12, %r13
# A[2] * A[1]
movq 16(%rdi), %rdx
mulxq %rax, %rcx, %rbx
xorq %r9, %r9
adoxq %rcx, %r12
# A[2] * A[3]
mulxq 24(%rdi), %r14, %r15
adoxq %rbx, %r13
# A[2] * A[0]
mulxq %rbp, %rcx, %rbx
adoxq %r9, %r14
adcxq %rcx, %r11
adoxq %r9, %r15
# A[1] * A[3]
movq %rax, %rdx
mulxq 24(%rdi), %rcx, %rdx
adcxq %rbx, %r12
adcxq %rcx, %r13
adcxq %rdx, %r14
adcxq %r9, %r15
# A[0] * A[0]
movq %rbp, %rdx
mulxq %rdx, %r9, %rcx
xorq %rbp, %rbp
adcxq %r10, %r10
# A[1] * A[1]
movq %rax, %rdx
adoxq %rcx, %r10
mulxq %rdx, %rcx, %rbx
adcxq %r11, %r11
adoxq %rcx, %r11
adcxq %r12, %r12
# A[2] * A[2]
movq 16(%rdi), %rdx
adoxq %rbx, %r12
mulxq %rdx, %rbx, %rcx
adcxq %r13, %r13
adoxq %rbx, %r13
adcxq %r14, %r14
# A[3] * A[3]
movq 24(%rdi), %rdx
adoxq %rcx, %r14
mulxq %rdx, %rcx, %rbx
adcxq %r15, %r15
adoxq %rcx, %r15
adcxq %rbp, %rbp
adoxq %rbx, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %rbx
addq %rbp, %r12
adcq $0x00, %rbx
movq $0x7fffffffffffffff, %rcx
shldq $0x01, %r12, %rbx
imulq $19, %rbx, %rbx
andq %rcx, %r12
xorq %rcx, %rcx
adoxq %rbx, %r9
mulxq %r13, %rbx, %r13
adcxq %rbx, %r9
adoxq %r13, %r10
mulxq %r14, %rbx, %r14
adcxq %rbx, %r10
adoxq %r14, %r11
mulxq %r15, %rbx, %r15
adcxq %rbx, %r11
adoxq %r15, %r12
adcxq %rcx, %r12
# Store
movq %r9, 128(%rsp)
movq %r10, 136(%rsp)
movq %r11, 144(%rsp)
movq %r12, 152(%rsp)
# Add-Sub
# Add
movq (%rsp), %r9
movq 8(%rsp), %r10
movq 16(%rsp), %r11
movq 24(%rsp), %r12
movq %r9, %r13
addq 32(%rsp), %r9
movq %r10, %r14
adcq 40(%rsp), %r10
movq %r11, %r15
adcq 48(%rsp), %r11
movq %r12, %rbp
adcq 56(%rsp), %r12
movq $0x00, %rcx
adcq $0x00, %rcx
shldq $0x01, %r12, %rcx
movq $0x7fffffffffffffff, %rbx
imulq $19, %rcx
andq %rbx, %r12
# Sub modulus (if overflow)
addq %rcx, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
# Sub
subq 32(%rsp), %r13
sbbq 40(%rsp), %r14
sbbq 48(%rsp), %r15
sbbq 56(%rsp), %rbp
sbbq %rcx, %rcx
shldq $0x01, %rbp, %rcx
imulq $-19, %rcx
andq %rbx, %rbp
# Add modulus (if underflow)
subq %rcx, %r13
sbbq $0x00, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbp
movq %r9, 64(%rsp)
movq %r10, 72(%rsp)
movq %r11, 80(%rsp)
movq %r12, 88(%rsp)
movq %r13, 32(%rsp)
movq %r14, 40(%rsp)
movq %r15, 48(%rsp)
movq %rbp, 56(%rsp)
# Multiply
# A[0] * B[0]
movq 96(%rsp), %rdx
mulxq 128(%rsp), %r9, %r10
# A[2] * B[0]
mulxq 144(%rsp), %r11, %r12
# A[1] * B[0]
mulxq 136(%rsp), %rcx, %rbx
xorq %rbp, %rbp
adcxq %rcx, %r10
# A[3] * B[1]
movq 104(%rsp), %rdx
mulxq 152(%rsp), %r13, %r14
adcxq %rbx, %r11
# A[0] * B[1]
mulxq 128(%rsp), %rcx, %rbx
adoxq %rcx, %r10
# A[2] * B[1]
mulxq 144(%rsp), %rcx, %r15
adoxq %rbx, %r11
adcxq %rcx, %r12
# A[1] * B[2]
movq 112(%rsp), %rdx
mulxq 136(%rsp), %rcx, %rbx
adcxq %r15, %r13
adoxq %rcx, %r12
adcxq %rbp, %r14
adoxq %rbx, %r13
# A[0] * B[2]
mulxq 128(%rsp), %rcx, %rbx
adoxq %rbp, %r14
xorq %r15, %r15
adcxq %rcx, %r11
# A[1] * B[1]
movq 104(%rsp), %rdx
mulxq 136(%rsp), %rdx, %rcx
adcxq %rbx, %r12
adoxq %rdx, %r11
# A[1] * B[3]
movq 120(%rsp), %rdx
adoxq %rcx, %r12
mulxq 136(%rsp), %rcx, %rbx
adcxq %rcx, %r13
# A[2] * B[2]
movq 112(%rsp), %rdx
mulxq 144(%rsp), %rdx, %rcx
adcxq %rbx, %r14
adoxq %rdx, %r13
# A[3] * B[3]
movq 120(%rsp), %rdx
adoxq %rcx, %r14
mulxq 152(%rsp), %rcx, %rbx
adoxq %rbp, %r15
adcxq %rcx, %r15
# A[0] * B[3]
mulxq 128(%rsp), %rdx, %rcx
adcxq %rbx, %rbp
xorq %rbx, %rbx
adcxq %rdx, %r12
# A[3] * B[0]
movq 152(%rsp), %rdx
adcxq %rcx, %r13
mulxq 96(%rsp), %rdx, %rcx
adoxq %rdx, %r12
adoxq %rcx, %r13
# A[3] * B[2]
movq 152(%rsp), %rdx
mulxq 112(%rsp), %rdx, %rcx
adcxq %rdx, %r14
# A[2] * B[3]
movq 120(%rsp), %rdx
adcxq %rcx, %r15
mulxq 144(%rsp), %rcx, %rdx
adcxq %rbx, %rbp
adoxq %rcx, %r14
adoxq %rdx, %r15
adoxq %rbx, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %rcx
addq %rbp, %r12
adcq $0x00, %rcx
movq $0x7fffffffffffffff, %rbx
shldq $0x01, %r12, %rcx
imulq $19, %rcx, %rcx
andq %rbx, %r12
xorq %rbx, %rbx
adoxq %rcx, %r9
mulxq %r13, %rcx, %r13
adcxq %rcx, %r9
adoxq %r13, %r10
mulxq %r14, %rcx, %r14
adcxq %rcx, %r10
adoxq %r14, %r11
mulxq %r15, %rcx, %r15
adcxq %rcx, %r11
adoxq %r15, %r12
adcxq %rbx, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
# Sub
movq 128(%rsp), %r9
movq 136(%rsp), %r10
movq 144(%rsp), %r11
movq 152(%rsp), %r12
subq 96(%rsp), %r9
sbbq 104(%rsp), %r10
sbbq 112(%rsp), %r11
sbbq 120(%rsp), %r12
sbbq %rcx, %rcx
shldq $0x01, %r12, %rcx
movq $0x7fffffffffffffff, %rbx
imulq $-19, %rcx
andq %rbx, %r12
# Add modulus (if underflow)
subq %rcx, %r9
sbbq $0x00, %r10
sbbq $0x00, %r11
sbbq $0x00, %r12
movq %r9, 128(%rsp)
movq %r10, 136(%rsp)
movq %r11, 144(%rsp)
movq %r12, 152(%rsp)
# Square
movq 32(%rsp), %rdx
movq 40(%rsp), %rax
# A[0] * A[1]
movq %rdx, %rbp
mulxq %rax, %r10, %r11
# A[0] * A[3]
mulxq 56(%rsp), %r12, %r13
# A[2] * A[1]
movq 48(%rsp), %rdx
mulxq %rax, %rcx, %rbx
xorq %r9, %r9
adoxq %rcx, %r12
# A[2] * A[3]
mulxq 56(%rsp), %r14, %r15
adoxq %rbx, %r13
# A[2] * A[0]
mulxq %rbp, %rcx, %rbx
adoxq %r9, %r14
adcxq %rcx, %r11
adoxq %r9, %r15
# A[1] * A[3]
movq %rax, %rdx
mulxq 56(%rsp), %rcx, %rdx
adcxq %rbx, %r12
adcxq %rcx, %r13
adcxq %rdx, %r14
adcxq %r9, %r15
# A[0] * A[0]
movq %rbp, %rdx
mulxq %rdx, %r9, %rcx
xorq %rbp, %rbp
adcxq %r10, %r10
# A[1] * A[1]
movq %rax, %rdx
adoxq %rcx, %r10
mulxq %rdx, %rcx, %rbx
adcxq %r11, %r11
adoxq %rcx, %r11
adcxq %r12, %r12
# A[2] * A[2]
movq 48(%rsp), %rdx
adoxq %rbx, %r12
mulxq %rdx, %rbx, %rcx
adcxq %r13, %r13
adoxq %rbx, %r13
adcxq %r14, %r14
# A[3] * A[3]
movq 56(%rsp), %rdx
adoxq %rcx, %r14
mulxq %rdx, %rcx, %rbx
adcxq %r15, %r15
adoxq %rcx, %r15
adcxq %rbp, %rbp
adoxq %rbx, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %rbx
addq %rbp, %r12
adcq $0x00, %rbx
movq $0x7fffffffffffffff, %rcx
shldq $0x01, %r12, %rbx
imulq $19, %rbx, %rbx
andq %rcx, %r12
xorq %rcx, %rcx
adoxq %rbx, %r9
mulxq %r13, %rbx, %r13
adcxq %rbx, %r9
adoxq %r13, %r10
mulxq %r14, %rbx, %r14
adcxq %rbx, %r10
adoxq %r14, %r11
mulxq %r15, %rbx, %r15
adcxq %rbx, %r11
adoxq %r15, %r12
adcxq %rcx, %r12
# Store
movq %r9, 32(%rsp)
movq %r10, 40(%rsp)
movq %r11, 48(%rsp)
movq %r12, 56(%rsp)
movq $0x1db42, %rdx
mulxq 128(%rsp), %r9, %rbp
mulxq 136(%rsp), %r10, %r15
mulxq 144(%rsp), %r11, %r14
mulxq 152(%rsp), %r12, %r13
addq %rbp, %r10
adcq %r15, %r11
adcq %r14, %r12
adcq $0x00, %r13
movq $0x7fffffffffffffff, %rbp
shldq $0x01, %r12, %r13
andq %rbp, %r12
imulq $19, %r13, %r13
addq %r13, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
movq %r9, (%rsp)
movq %r10, 8(%rsp)
movq %r11, 16(%rsp)
movq %r12, 24(%rsp)
# Square
movq 64(%rsp), %rdx
movq 72(%rsp), %rax
# A[0] * A[1]
movq %rdx, %rbp
mulxq %rax, %r10, %r11
# A[0] * A[3]
mulxq 88(%rsp), %r12, %r13
# A[2] * A[1]
movq 80(%rsp), %rdx
mulxq %rax, %rcx, %rbx
xorq %r9, %r9
adoxq %rcx, %r12
# A[2] * A[3]
mulxq 88(%rsp), %r14, %r15
adoxq %rbx, %r13
# A[2] * A[0]
mulxq %rbp, %rcx, %rbx
adoxq %r9, %r14
adcxq %rcx, %r11
adoxq %r9, %r15
# A[1] * A[3]
movq %rax, %rdx
mulxq 88(%rsp), %rcx, %rdx
adcxq %rbx, %r12
adcxq %rcx, %r13
adcxq %rdx, %r14
adcxq %r9, %r15
# A[0] * A[0]
movq %rbp, %rdx
mulxq %rdx, %r9, %rcx
xorq %rbp, %rbp
adcxq %r10, %r10
# A[1] * A[1]
movq %rax, %rdx
adoxq %rcx, %r10
mulxq %rdx, %rcx, %rbx
adcxq %r11, %r11
adoxq %rcx, %r11
adcxq %r12, %r12
# A[2] * A[2]
movq 80(%rsp), %rdx
adoxq %rbx, %r12
mulxq %rdx, %rbx, %rcx
adcxq %r13, %r13
adoxq %rbx, %r13
adcxq %r14, %r14
# A[3] * A[3]
movq 88(%rsp), %rdx
adoxq %rcx, %r14
mulxq %rdx, %rcx, %rbx
adcxq %r15, %r15
adoxq %rcx, %r15
adcxq %rbp, %rbp
adoxq %rbx, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %rbx
addq %rbp, %r12
adcq $0x00, %rbx
movq $0x7fffffffffffffff, %rcx
shldq $0x01, %r12, %rbx
imulq $19, %rbx, %rbx
andq %rcx, %r12
xorq %rcx, %rcx
adoxq %rbx, %r9
mulxq %r13, %rbx, %r13
adcxq %rbx, %r9
adoxq %r13, %r10
mulxq %r14, %rbx, %r14
adcxq %rbx, %r10
adoxq %r14, %r11
mulxq %r15, %rbx, %r15
adcxq %rbx, %r11
adoxq %r15, %r12
adcxq %rcx, %r12
# Store
movq %r9, 64(%rsp)
movq %r10, 72(%rsp)
movq %r11, 80(%rsp)
movq %r12, 88(%rsp)
# Add
movq 96(%rsp), %r9
movq 104(%rsp), %r10
addq (%rsp), %r9
movq 112(%rsp), %r11
adcq 8(%rsp), %r10
movq 120(%rsp), %r12
adcq 16(%rsp), %r11
adcq 24(%rsp), %r12
movq $0x00, %rcx
adcq $0x00, %rcx
shldq $0x01, %r12, %rcx
movq $0x7fffffffffffffff, %rbx
imulq $19, %rcx
andq %rbx, %r12
# Sub modulus (if overflow)
addq %rcx, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
movq %r9, 96(%rsp)
movq %r10, 104(%rsp)
movq %r11, 112(%rsp)
movq %r12, 120(%rsp)
# Multiply
# A[0] * B[0]
movq 32(%rsp), %rdx
mulxq (%r8), %r9, %r10
# A[2] * B[0]
mulxq 16(%r8), %r11, %r12
# A[1] * B[0]
mulxq 8(%r8), %rcx, %rbx
xorq %rbp, %rbp
adcxq %rcx, %r10
# A[3] * B[1]
movq 40(%rsp), %rdx
mulxq 24(%r8), %r13, %r14
adcxq %rbx, %r11
# A[0] * B[1]
mulxq (%r8), %rcx, %rbx
adoxq %rcx, %r10
# A[2] * B[1]
mulxq 16(%r8), %rcx, %r15
adoxq %rbx, %r11
adcxq %rcx, %r12
# A[1] * B[2]
movq 48(%rsp), %rdx
mulxq 8(%r8), %rcx, %rbx
adcxq %r15, %r13
adoxq %rcx, %r12
adcxq %rbp, %r14
adoxq %rbx, %r13
# A[0] * B[2]
mulxq (%r8), %rcx, %rbx
adoxq %rbp, %r14
xorq %r15, %r15
adcxq %rcx, %r11
# A[1] * B[1]
movq 40(%rsp), %rdx
mulxq 8(%r8), %rdx, %rcx
adcxq %rbx, %r12
adoxq %rdx, %r11
# A[1] * B[3]
movq 56(%rsp), %rdx
adoxq %rcx, %r12
mulxq 8(%r8), %rcx, %rbx
adcxq %rcx, %r13
# A[2] * B[2]
movq 48(%rsp), %rdx
mulxq 16(%r8), %rdx, %rcx
adcxq %rbx, %r14
adoxq %rdx, %r13
# A[3] * B[3]
movq 56(%rsp), %rdx
adoxq %rcx, %r14
mulxq 24(%r8), %rcx, %rbx
adoxq %rbp, %r15
adcxq %rcx, %r15
# A[0] * B[3]
mulxq (%r8), %rdx, %rcx
adcxq %rbx, %rbp
xorq %rbx, %rbx
adcxq %rdx, %r12
# A[3] * B[0]
movq 24(%r8), %rdx
adcxq %rcx, %r13
mulxq 32(%rsp), %rdx, %rcx
adoxq %rdx, %r12
adoxq %rcx, %r13
# A[3] * B[2]
movq 24(%r8), %rdx
mulxq 48(%rsp), %rdx, %rcx
adcxq %rdx, %r14
# A[2] * B[3]
movq 56(%rsp), %rdx
adcxq %rcx, %r15
mulxq 16(%r8), %rcx, %rdx
adcxq %rbx, %rbp
adoxq %rcx, %r14
adoxq %rdx, %r15
adoxq %rbx, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %rcx
addq %rbp, %r12
adcq $0x00, %rcx
movq $0x7fffffffffffffff, %rbx
shldq $0x01, %r12, %rcx
imulq $19, %rcx, %rcx
andq %rbx, %r12
xorq %rbx, %rbx
adoxq %rcx, %r9
mulxq %r13, %rcx, %r13
adcxq %rcx, %r9
adoxq %r13, %r10
mulxq %r14, %rcx, %r14
adcxq %rcx, %r10
adoxq %r14, %r11
mulxq %r15, %rcx, %r15
adcxq %rcx, %r11
adoxq %r15, %r12
adcxq %rbx, %r12
# Store
movq %r9, 32(%rsp)
movq %r10, 40(%rsp)
movq %r11, 48(%rsp)
movq %r12, 56(%rsp)
# Multiply
# A[0] * B[0]
movq 96(%rsp), %rdx
mulxq 128(%rsp), %r9, %r10
# A[2] * B[0]
mulxq 144(%rsp), %r11, %r12
# A[1] * B[0]
mulxq 136(%rsp), %rcx, %rbx
xorq %rbp, %rbp
adcxq %rcx, %r10
# A[3] * B[1]
movq 104(%rsp), %rdx
mulxq 152(%rsp), %r13, %r14
adcxq %rbx, %r11
# A[0] * B[1]
mulxq 128(%rsp), %rcx, %rbx
adoxq %rcx, %r10
# A[2] * B[1]
mulxq 144(%rsp), %rcx, %r15
adoxq %rbx, %r11
adcxq %rcx, %r12
# A[1] * B[2]
movq 112(%rsp), %rdx
mulxq 136(%rsp), %rcx, %rbx
adcxq %r15, %r13
adoxq %rcx, %r12
adcxq %rbp, %r14
adoxq %rbx, %r13
# A[0] * B[2]
mulxq 128(%rsp), %rcx, %rbx
adoxq %rbp, %r14
xorq %r15, %r15
adcxq %rcx, %r11
# A[1] * B[1]
movq 104(%rsp), %rdx
mulxq 136(%rsp), %rdx, %rcx
adcxq %rbx, %r12
adoxq %rdx, %r11
# A[1] * B[3]
movq 120(%rsp), %rdx
adoxq %rcx, %r12
mulxq 136(%rsp), %rcx, %rbx
adcxq %rcx, %r13
# A[2] * B[2]
movq 112(%rsp), %rdx
mulxq 144(%rsp), %rdx, %rcx
adcxq %rbx, %r14
adoxq %rdx, %r13
# A[3] * B[3]
movq 120(%rsp), %rdx
adoxq %rcx, %r14
mulxq 152(%rsp), %rcx, %rbx
adoxq %rbp, %r15
adcxq %rcx, %r15
# A[0] * B[3]
mulxq 128(%rsp), %rdx, %rcx
adcxq %rbx, %rbp
xorq %rbx, %rbx
adcxq %rdx, %r12
# A[3] * B[0]
movq 152(%rsp), %rdx
adcxq %rcx, %r13
mulxq 96(%rsp), %rdx, %rcx
adoxq %rdx, %r12
adoxq %rcx, %r13
# A[3] * B[2]
movq 152(%rsp), %rdx
mulxq 112(%rsp), %rdx, %rcx
adcxq %rdx, %r14
# A[2] * B[3]
movq 120(%rsp), %rdx
adcxq %rcx, %r15
mulxq 144(%rsp), %rcx, %rdx
adcxq %rbx, %rbp
adoxq %rcx, %r14
adoxq %rdx, %r15
adoxq %rbx, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %rcx
addq %rbp, %r12
adcq $0x00, %rcx
movq $0x7fffffffffffffff, %rbx
shldq $0x01, %r12, %rcx
imulq $19, %rcx, %rcx
andq %rbx, %r12
xorq %rbx, %rbx
adoxq %rcx, %r9
mulxq %r13, %rcx, %r13
adcxq %rcx, %r9
adoxq %r13, %r10
mulxq %r14, %rcx, %r14
adcxq %rcx, %r10
adoxq %r14, %r11
mulxq %r15, %rcx, %r15
adcxq %rcx, %r11
adoxq %r15, %r12
adcxq %rbx, %r12
# Store
movq %r9, (%rsp)
movq %r10, 8(%rsp)
movq %r11, 16(%rsp)
movq %r12, 24(%rsp)
movq 160(%rsp), %rbx
decq %rbx
jge L_curve25519_avx2_bits
# Invert
leaq 32(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
movq %rsp, %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 96(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $4, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $9, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 128(%rsp), %rdi
leaq 96(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 128(%rsp), %rdi
leaq 128(%rsp), %rsi
movq $19, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 128(%rsp), %rsi
leaq 96(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $9, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $49, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 128(%rsp), %rdi
leaq 96(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 128(%rsp), %rdi
leaq 128(%rsp), %rsi
movq $0x63, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 128(%rsp), %rsi
leaq 96(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 96(%rsp), %rdi
leaq 96(%rsp), %rsi
movq $49, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 64(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $4, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
movq %rsp, %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
movq 168(%rsp), %rdi
# Multiply
# A[0] * B[0]
movq (%rsp), %rdx
mulxq (%rdi), %r9, %r10
# A[2] * B[0]
mulxq 16(%rdi), %r11, %r12
# A[1] * B[0]
mulxq 8(%rdi), %rcx, %rbx
xorq %rbp, %rbp
adcxq %rcx, %r10
# A[3] * B[1]
movq 8(%rsp), %rdx
mulxq 24(%rdi), %r13, %r14
adcxq %rbx, %r11
# A[0] * B[1]
mulxq (%rdi), %rcx, %rbx
adoxq %rcx, %r10
# A[2] * B[1]
mulxq 16(%rdi), %rcx, %r15
adoxq %rbx, %r11
adcxq %rcx, %r12
# A[1] * B[2]
movq 16(%rsp), %rdx
mulxq 8(%rdi), %rcx, %rbx
adcxq %r15, %r13
adoxq %rcx, %r12
adcxq %rbp, %r14
adoxq %rbx, %r13
# A[0] * B[2]
mulxq (%rdi), %rcx, %rbx
adoxq %rbp, %r14
xorq %r15, %r15
adcxq %rcx, %r11
# A[1] * B[1]
movq 8(%rsp), %rdx
mulxq 8(%rdi), %rdx, %rcx
adcxq %rbx, %r12
adoxq %rdx, %r11
# A[1] * B[3]
movq 24(%rsp), %rdx
adoxq %rcx, %r12
mulxq 8(%rdi), %rcx, %rbx
adcxq %rcx, %r13
# A[2] * B[2]
movq 16(%rsp), %rdx
mulxq 16(%rdi), %rdx, %rcx
adcxq %rbx, %r14
adoxq %rdx, %r13
# A[3] * B[3]
movq 24(%rsp), %rdx
adoxq %rcx, %r14
mulxq 24(%rdi), %rcx, %rbx
adoxq %rbp, %r15
adcxq %rcx, %r15
# A[0] * B[3]
mulxq (%rdi), %rdx, %rcx
adcxq %rbx, %rbp
xorq %rbx, %rbx
adcxq %rdx, %r12
# A[3] * B[0]
movq 24(%rdi), %rdx
adcxq %rcx, %r13
mulxq (%rsp), %rdx, %rcx
adoxq %rdx, %r12
adoxq %rcx, %r13
# A[3] * B[2]
movq 24(%rdi), %rdx
mulxq 16(%rsp), %rdx, %rcx
adcxq %rdx, %r14
# A[2] * B[3]
movq 24(%rsp), %rdx
adcxq %rcx, %r15
mulxq 16(%rdi), %rcx, %rdx
adcxq %rbx, %rbp
adoxq %rcx, %r14
adoxq %rdx, %r15
adoxq %rbx, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %rcx
addq %rbp, %r12
adcq $0x00, %rcx
movq $0x7fffffffffffffff, %rbx
shldq $0x01, %r12, %rcx
imulq $19, %rcx, %rcx
andq %rbx, %r12
xorq %rbx, %rbx
adoxq %rcx, %r9
mulxq %r13, %rcx, %r13
adcxq %rcx, %r9
adoxq %r13, %r10
mulxq %r14, %rcx, %r14
adcxq %rcx, %r10
adoxq %r14, %r11
mulxq %r15, %rcx, %r15
adcxq %rcx, %r11
adoxq %r15, %r12
adcxq %rbx, %r12
movq $0x7fffffffffffffff, %rbx
movq %r12, %rdx
sarq $63, %rdx
andq $19, %rdx
andq %rbx, %r12
addq %rdx, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
movq $0x7fffffffffffffff, %rcx
movq %r9, %rdx
addq $19, %rdx
movq %r10, %rdx
adcq $0x00, %rdx
movq %r11, %rdx
adcq $0x00, %rdx
movq %r12, %rdx
adcq $0x00, %rdx
sarq $63, %rdx
andq $19, %rdx
andq %rcx, %r12
addq %rdx, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
xorq %rax, %rax
addq $0xb8, %rsp
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size curve25519_avx2,.-curve25519_avx2
#endif /* __APPLE__ */
#ifdef HAVE_ED25519
#ifndef __APPLE__
.text
.globl fe_sq2_avx2
.type fe_sq2_avx2,@function
.align 16
fe_sq2_avx2:
#else
.section __TEXT,__text
.globl _fe_sq2_avx2
.p2align 4
_fe_sq2_avx2:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
# Square * 2
movq (%rsi), %rdx
movq 8(%rsi), %rax
# A[0] * A[1]
movq %rdx, %r15
mulxq %rax, %r9, %r10
# A[0] * A[3]
mulxq 24(%rsi), %r11, %r12
# A[2] * A[1]
movq 16(%rsi), %rdx
mulxq %rax, %rcx, %rbx
xorq %r8, %r8
adoxq %rcx, %r11
# A[2] * A[3]
mulxq 24(%rsi), %r13, %r14
adoxq %rbx, %r12
# A[2] * A[0]
mulxq %r15, %rcx, %rbx
adoxq %r8, %r13
adcxq %rcx, %r10
adoxq %r8, %r14
# A[1] * A[3]
movq %rax, %rdx
mulxq 24(%rsi), %rcx, %rdx
adcxq %rbx, %r11
adcxq %rcx, %r12
adcxq %rdx, %r13
adcxq %r8, %r14
# A[0] * A[0]
movq %r15, %rdx
mulxq %rdx, %r8, %rcx
xorq %r15, %r15
adcxq %r9, %r9
# A[1] * A[1]
movq %rax, %rdx
adoxq %rcx, %r9
mulxq %rdx, %rcx, %rbx
adcxq %r10, %r10
adoxq %rcx, %r10
adcxq %r11, %r11
# A[2] * A[2]
movq 16(%rsi), %rdx
adoxq %rbx, %r11
mulxq %rdx, %rbx, %rcx
adcxq %r12, %r12
adoxq %rbx, %r12
adcxq %r13, %r13
# A[3] * A[3]
movq 24(%rsi), %rdx
adoxq %rcx, %r13
mulxq %rdx, %rcx, %rbx
adcxq %r14, %r14
adoxq %rcx, %r14
adcxq %r15, %r15
adoxq %rbx, %r15
movq $38, %rdx
mulxq %r15, %r15, %rax
addq %r15, %r11
adcq $0x00, %rax
movq $0x7fffffffffffffff, %rcx
shldq $0x01, %r11, %rax
imulq $19, %rax, %rax
andq %rcx, %r11
xorq %rcx, %rcx
adoxq %rax, %r8
mulxq %r12, %rax, %r12
adcxq %rax, %r8
adoxq %r12, %r9
mulxq %r13, %rax, %r13
adcxq %rax, %r9
adoxq %r13, %r10
mulxq %r14, %rax, %r14
adcxq %rax, %r10
adoxq %r14, %r11
adcxq %rcx, %r11
movq %r11, %rax
shldq $0x01, %r10, %r11
shldq $0x01, %r9, %r10
shldq $0x01, %r8, %r9
shlq $0x01, %r8
movq $0x7fffffffffffffff, %rcx
shrq $62, %rax
andq %rcx, %r11
imulq $19, %rax, %rax
addq %rax, %r8
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
# Store
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size fe_sq2_avx2,.-fe_sq2_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl fe_pow22523_avx2
.type fe_pow22523_avx2,@function
.align 16
fe_pow22523_avx2:
#else
.section __TEXT,__text
.globl _fe_pow22523_avx2
.p2align 4
_fe_pow22523_avx2:
#endif /* __APPLE__ */
subq $0x70, %rsp
# pow22523
movq %rdi, 96(%rsp)
movq %rsi, 104(%rsp)
movq %rsp, %rdi
movq 104(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq 104(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
movq %rsp, %rdi
movq %rsp, %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
movq %rsp, %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
movq %rsp, %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq $4, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
movq %rsp, %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq $9, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $19, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq $9, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
movq %rsp, %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq $49, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 64(%rsp), %rdi
leaq 64(%rsp), %rsi
movq $0x63, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
leaq 32(%rsp), %rdi
leaq 32(%rsp), %rsi
movq $49, %rdx
#ifndef __APPLE__
callq fe_sq_n_avx2@plt
#else
callq _fe_sq_n_avx2
#endif /* __APPLE__ */
movq %rsp, %rdi
leaq 32(%rsp), %rsi
movq %rsp, %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
movq %rsp, %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
movq %rsp, %rdi
movq %rsp, %rsi
#ifndef __APPLE__
callq fe_sq_avx2@plt
#else
callq _fe_sq_avx2
#endif /* __APPLE__ */
movq 96(%rsp), %rdi
movq %rsp, %rsi
movq 104(%rsp), %rdx
#ifndef __APPLE__
callq fe_mul_avx2@plt
#else
callq _fe_mul_avx2
#endif /* __APPLE__ */
movq 104(%rsp), %rsi
movq 96(%rsp), %rdi
addq $0x70, %rsp
repz retq
#ifndef __APPLE__
.text
.globl ge_p1p1_to_p2_avx2
.type ge_p1p1_to_p2_avx2,@function
.align 16
ge_p1p1_to_p2_avx2:
#else
.section __TEXT,__text
.globl _ge_p1p1_to_p2_avx2
.p2align 4
_ge_p1p1_to_p2_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
subq $16, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
leaq 96(%rsi), %rax
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rsi), %r9, %r10
# A[2] * B[0]
mulxq 16(%rsi), %r11, %r12
# A[1] * B[0]
mulxq 8(%rsi), %rcx, %r8
xorq %rbx, %rbx
adcxq %rcx, %r10
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rsi), %r13, %r14
adcxq %r8, %r11
# A[0] * B[1]
mulxq (%rsi), %rcx, %r8
adoxq %rcx, %r10
# A[2] * B[1]
mulxq 16(%rsi), %rcx, %r15
adoxq %r8, %r11
adcxq %rcx, %r12
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rsi), %rcx, %r8
adcxq %r15, %r13
adoxq %rcx, %r12
adcxq %rbx, %r14
adoxq %r8, %r13
# A[0] * B[2]
mulxq (%rsi), %rcx, %r8
adoxq %rbx, %r14
xorq %r15, %r15
adcxq %rcx, %r11
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rsi), %rdx, %rcx
adcxq %r8, %r12
adoxq %rdx, %r11
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r12
mulxq 8(%rsi), %rcx, %r8
adcxq %rcx, %r13
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rsi), %rdx, %rcx
adcxq %r8, %r14
adoxq %rdx, %r13
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r14
mulxq 24(%rsi), %rcx, %r8
adoxq %rbx, %r15
adcxq %rcx, %r15
# A[0] * B[3]
mulxq (%rsi), %rdx, %rcx
adcxq %r8, %rbx
xorq %r8, %r8
adcxq %rdx, %r12
# A[3] * B[0]
movq 24(%rsi), %rdx
adcxq %rcx, %r13
mulxq (%rax), %rdx, %rcx
adoxq %rdx, %r12
adoxq %rcx, %r13
# A[3] * B[2]
movq 24(%rsi), %rdx
mulxq 16(%rax), %rdx, %rcx
adcxq %rdx, %r14
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %rcx, %r15
mulxq 16(%rsi), %rcx, %rdx
adcxq %r8, %rbx
adoxq %rcx, %r14
adoxq %rdx, %r15
adoxq %r8, %rbx
movq $38, %rdx
mulxq %rbx, %rbx, %rcx
addq %rbx, %r12
adcq $0x00, %rcx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rcx
imulq $19, %rcx, %rcx
andq %r8, %r12
xorq %r8, %r8
adoxq %rcx, %r9
mulxq %r13, %rcx, %r13
adcxq %rcx, %r9
adoxq %r13, %r10
mulxq %r14, %rcx, %r14
adcxq %rcx, %r10
adoxq %r14, %r11
mulxq %r15, %rcx, %r15
adcxq %rcx, %r11
adoxq %r15, %r12
adcxq %r8, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
leaq 64(%rsi), %rsi
leaq 64(%rdi), %rdi
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rsi), %r9, %r10
# A[2] * B[0]
mulxq 16(%rsi), %r11, %r12
# A[1] * B[0]
mulxq 8(%rsi), %rcx, %r8
xorq %rbx, %rbx
adcxq %rcx, %r10
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rsi), %r13, %r14
adcxq %r8, %r11
# A[0] * B[1]
mulxq (%rsi), %rcx, %r8
adoxq %rcx, %r10
# A[2] * B[1]
mulxq 16(%rsi), %rcx, %r15
adoxq %r8, %r11
adcxq %rcx, %r12
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rsi), %rcx, %r8
adcxq %r15, %r13
adoxq %rcx, %r12
adcxq %rbx, %r14
adoxq %r8, %r13
# A[0] * B[2]
mulxq (%rsi), %rcx, %r8
adoxq %rbx, %r14
xorq %r15, %r15
adcxq %rcx, %r11
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rsi), %rdx, %rcx
adcxq %r8, %r12
adoxq %rdx, %r11
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r12
mulxq 8(%rsi), %rcx, %r8
adcxq %rcx, %r13
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rsi), %rdx, %rcx
adcxq %r8, %r14
adoxq %rdx, %r13
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r14
mulxq 24(%rsi), %rcx, %r8
adoxq %rbx, %r15
adcxq %rcx, %r15
# A[0] * B[3]
mulxq (%rsi), %rdx, %rcx
adcxq %r8, %rbx
xorq %r8, %r8
adcxq %rdx, %r12
# A[3] * B[0]
movq 24(%rsi), %rdx
adcxq %rcx, %r13
mulxq (%rax), %rdx, %rcx
adoxq %rdx, %r12
adoxq %rcx, %r13
# A[3] * B[2]
movq 24(%rsi), %rdx
mulxq 16(%rax), %rdx, %rcx
adcxq %rdx, %r14
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %rcx, %r15
mulxq 16(%rsi), %rcx, %rdx
adcxq %r8, %rbx
adoxq %rcx, %r14
adoxq %rdx, %r15
adoxq %r8, %rbx
movq $38, %rdx
mulxq %rbx, %rbx, %rcx
addq %rbx, %r12
adcq $0x00, %rcx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rcx
imulq $19, %rcx, %rcx
andq %r8, %r12
xorq %r8, %r8
adoxq %rcx, %r9
mulxq %r13, %rcx, %r13
adcxq %rcx, %r9
adoxq %r13, %r10
mulxq %r14, %rcx, %r14
adcxq %rcx, %r10
adoxq %r14, %r11
mulxq %r15, %rcx, %r15
adcxq %rcx, %r11
adoxq %r15, %r12
adcxq %r8, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
leaq -32(%rsi), %rax
leaq -32(%rdi), %rdi
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rsi), %r9, %r10
# A[2] * B[0]
mulxq 16(%rsi), %r11, %r12
# A[1] * B[0]
mulxq 8(%rsi), %rcx, %r8
xorq %rbx, %rbx
adcxq %rcx, %r10
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rsi), %r13, %r14
adcxq %r8, %r11
# A[0] * B[1]
mulxq (%rsi), %rcx, %r8
adoxq %rcx, %r10
# A[2] * B[1]
mulxq 16(%rsi), %rcx, %r15
adoxq %r8, %r11
adcxq %rcx, %r12
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rsi), %rcx, %r8
adcxq %r15, %r13
adoxq %rcx, %r12
adcxq %rbx, %r14
adoxq %r8, %r13
# A[0] * B[2]
mulxq (%rsi), %rcx, %r8
adoxq %rbx, %r14
xorq %r15, %r15
adcxq %rcx, %r11
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rsi), %rdx, %rcx
adcxq %r8, %r12
adoxq %rdx, %r11
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r12
mulxq 8(%rsi), %rcx, %r8
adcxq %rcx, %r13
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rsi), %rdx, %rcx
adcxq %r8, %r14
adoxq %rdx, %r13
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r14
mulxq 24(%rsi), %rcx, %r8
adoxq %rbx, %r15
adcxq %rcx, %r15
# A[0] * B[3]
mulxq (%rsi), %rdx, %rcx
adcxq %r8, %rbx
xorq %r8, %r8
adcxq %rdx, %r12
# A[3] * B[0]
movq 24(%rsi), %rdx
adcxq %rcx, %r13
mulxq (%rax), %rdx, %rcx
adoxq %rdx, %r12
adoxq %rcx, %r13
# A[3] * B[2]
movq 24(%rsi), %rdx
mulxq 16(%rax), %rdx, %rcx
adcxq %rdx, %r14
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %rcx, %r15
mulxq 16(%rsi), %rcx, %rdx
adcxq %r8, %rbx
adoxq %rcx, %r14
adoxq %rdx, %r15
adoxq %r8, %rbx
movq $38, %rdx
mulxq %rbx, %rbx, %rcx
addq %rbx, %r12
adcq $0x00, %rcx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rcx
imulq $19, %rcx, %rcx
andq %r8, %r12
xorq %r8, %r8
adoxq %rcx, %r9
mulxq %r13, %rcx, %r13
adcxq %rcx, %r9
adoxq %r13, %r10
mulxq %r14, %rcx, %r14
adcxq %rcx, %r10
adoxq %r14, %r11
mulxq %r15, %rcx, %r15
adcxq %rcx, %r11
adoxq %r15, %r12
adcxq %r8, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
addq $16, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_p1p1_to_p2_avx2,.-ge_p1p1_to_p2_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_p1p1_to_p3_avx2
.type ge_p1p1_to_p3_avx2,@function
.align 16
ge_p1p1_to_p3_avx2:
#else
.section __TEXT,__text
.globl _ge_p1p1_to_p3_avx2
.p2align 4
_ge_p1p1_to_p3_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
subq $16, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
leaq 96(%rsi), %rax
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rsi), %r9, %r10
# A[2] * B[0]
mulxq 16(%rsi), %r11, %r12
# A[1] * B[0]
mulxq 8(%rsi), %rcx, %r8
xorq %rbx, %rbx
adcxq %rcx, %r10
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rsi), %r13, %r14
adcxq %r8, %r11
# A[0] * B[1]
mulxq (%rsi), %rcx, %r8
adoxq %rcx, %r10
# A[2] * B[1]
mulxq 16(%rsi), %rcx, %r15
adoxq %r8, %r11
adcxq %rcx, %r12
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rsi), %rcx, %r8
adcxq %r15, %r13
adoxq %rcx, %r12
adcxq %rbx, %r14
adoxq %r8, %r13
# A[0] * B[2]
mulxq (%rsi), %rcx, %r8
adoxq %rbx, %r14
xorq %r15, %r15
adcxq %rcx, %r11
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rsi), %rdx, %rcx
adcxq %r8, %r12
adoxq %rdx, %r11
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r12
mulxq 8(%rsi), %rcx, %r8
adcxq %rcx, %r13
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rsi), %rdx, %rcx
adcxq %r8, %r14
adoxq %rdx, %r13
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r14
mulxq 24(%rsi), %rcx, %r8
adoxq %rbx, %r15
adcxq %rcx, %r15
# A[0] * B[3]
mulxq (%rsi), %rdx, %rcx
adcxq %r8, %rbx
xorq %r8, %r8
adcxq %rdx, %r12
# A[3] * B[0]
movq 24(%rsi), %rdx
adcxq %rcx, %r13
mulxq (%rax), %rdx, %rcx
adoxq %rdx, %r12
adoxq %rcx, %r13
# A[3] * B[2]
movq 24(%rsi), %rdx
mulxq 16(%rax), %rdx, %rcx
adcxq %rdx, %r14
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %rcx, %r15
mulxq 16(%rsi), %rcx, %rdx
adcxq %r8, %rbx
adoxq %rcx, %r14
adoxq %rdx, %r15
adoxq %r8, %rbx
movq $38, %rdx
mulxq %rbx, %rbx, %rcx
addq %rbx, %r12
adcq $0x00, %rcx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rcx
imulq $19, %rcx, %rcx
andq %r8, %r12
xorq %r8, %r8
adoxq %rcx, %r9
mulxq %r13, %rcx, %r13
adcxq %rcx, %r9
adoxq %r13, %r10
mulxq %r14, %rcx, %r14
adcxq %rcx, %r10
adoxq %r14, %r11
mulxq %r15, %rcx, %r15
adcxq %rcx, %r11
adoxq %r15, %r12
adcxq %r8, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
leaq 32(%rsi), %rax
leaq 96(%rdi), %rdi
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rsi), %r9, %r10
# A[2] * B[0]
mulxq 16(%rsi), %r11, %r12
# A[1] * B[0]
mulxq 8(%rsi), %rcx, %r8
xorq %rbx, %rbx
adcxq %rcx, %r10
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rsi), %r13, %r14
adcxq %r8, %r11
# A[0] * B[1]
mulxq (%rsi), %rcx, %r8
adoxq %rcx, %r10
# A[2] * B[1]
mulxq 16(%rsi), %rcx, %r15
adoxq %r8, %r11
adcxq %rcx, %r12
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rsi), %rcx, %r8
adcxq %r15, %r13
adoxq %rcx, %r12
adcxq %rbx, %r14
adoxq %r8, %r13
# A[0] * B[2]
mulxq (%rsi), %rcx, %r8
adoxq %rbx, %r14
xorq %r15, %r15
adcxq %rcx, %r11
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rsi), %rdx, %rcx
adcxq %r8, %r12
adoxq %rdx, %r11
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r12
mulxq 8(%rsi), %rcx, %r8
adcxq %rcx, %r13
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rsi), %rdx, %rcx
adcxq %r8, %r14
adoxq %rdx, %r13
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r14
mulxq 24(%rsi), %rcx, %r8
adoxq %rbx, %r15
adcxq %rcx, %r15
# A[0] * B[3]
mulxq (%rsi), %rdx, %rcx
adcxq %r8, %rbx
xorq %r8, %r8
adcxq %rdx, %r12
# A[3] * B[0]
movq 24(%rsi), %rdx
adcxq %rcx, %r13
mulxq (%rax), %rdx, %rcx
adoxq %rdx, %r12
adoxq %rcx, %r13
# A[3] * B[2]
movq 24(%rsi), %rdx
mulxq 16(%rax), %rdx, %rcx
adcxq %rdx, %r14
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %rcx, %r15
mulxq 16(%rsi), %rcx, %rdx
adcxq %r8, %rbx
adoxq %rcx, %r14
adoxq %rdx, %r15
adoxq %r8, %rbx
movq $38, %rdx
mulxq %rbx, %rbx, %rcx
addq %rbx, %r12
adcq $0x00, %rcx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rcx
imulq $19, %rcx, %rcx
andq %r8, %r12
xorq %r8, %r8
adoxq %rcx, %r9
mulxq %r13, %rcx, %r13
adcxq %rcx, %r9
adoxq %r13, %r10
mulxq %r14, %rcx, %r14
adcxq %rcx, %r10
adoxq %r14, %r11
mulxq %r15, %rcx, %r15
adcxq %rcx, %r11
adoxq %r15, %r12
adcxq %r8, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
leaq 64(%rsi), %rsi
leaq -64(%rdi), %rdi
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rsi), %r9, %r10
# A[2] * B[0]
mulxq 16(%rsi), %r11, %r12
# A[1] * B[0]
mulxq 8(%rsi), %rcx, %r8
xorq %rbx, %rbx
adcxq %rcx, %r10
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rsi), %r13, %r14
adcxq %r8, %r11
# A[0] * B[1]
mulxq (%rsi), %rcx, %r8
adoxq %rcx, %r10
# A[2] * B[1]
mulxq 16(%rsi), %rcx, %r15
adoxq %r8, %r11
adcxq %rcx, %r12
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rsi), %rcx, %r8
adcxq %r15, %r13
adoxq %rcx, %r12
adcxq %rbx, %r14
adoxq %r8, %r13
# A[0] * B[2]
mulxq (%rsi), %rcx, %r8
adoxq %rbx, %r14
xorq %r15, %r15
adcxq %rcx, %r11
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rsi), %rdx, %rcx
adcxq %r8, %r12
adoxq %rdx, %r11
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r12
mulxq 8(%rsi), %rcx, %r8
adcxq %rcx, %r13
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rsi), %rdx, %rcx
adcxq %r8, %r14
adoxq %rdx, %r13
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r14
mulxq 24(%rsi), %rcx, %r8
adoxq %rbx, %r15
adcxq %rcx, %r15
# A[0] * B[3]
mulxq (%rsi), %rdx, %rcx
adcxq %r8, %rbx
xorq %r8, %r8
adcxq %rdx, %r12
# A[3] * B[0]
movq 24(%rsi), %rdx
adcxq %rcx, %r13
mulxq (%rax), %rdx, %rcx
adoxq %rdx, %r12
adoxq %rcx, %r13
# A[3] * B[2]
movq 24(%rsi), %rdx
mulxq 16(%rax), %rdx, %rcx
adcxq %rdx, %r14
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %rcx, %r15
mulxq 16(%rsi), %rcx, %rdx
adcxq %r8, %rbx
adoxq %rcx, %r14
adoxq %rdx, %r15
adoxq %r8, %rbx
movq $38, %rdx
mulxq %rbx, %rbx, %rcx
addq %rbx, %r12
adcq $0x00, %rcx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rcx
imulq $19, %rcx, %rcx
andq %r8, %r12
xorq %r8, %r8
adoxq %rcx, %r9
mulxq %r13, %rcx, %r13
adcxq %rcx, %r9
adoxq %r13, %r10
mulxq %r14, %rcx, %r14
adcxq %rcx, %r10
adoxq %r14, %r11
mulxq %r15, %rcx, %r15
adcxq %rcx, %r11
adoxq %r15, %r12
adcxq %r8, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
leaq 32(%rsi), %rax
leaq 32(%rdi), %rdi
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rsi), %r9, %r10
# A[2] * B[0]
mulxq 16(%rsi), %r11, %r12
# A[1] * B[0]
mulxq 8(%rsi), %rcx, %r8
xorq %rbx, %rbx
adcxq %rcx, %r10
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rsi), %r13, %r14
adcxq %r8, %r11
# A[0] * B[1]
mulxq (%rsi), %rcx, %r8
adoxq %rcx, %r10
# A[2] * B[1]
mulxq 16(%rsi), %rcx, %r15
adoxq %r8, %r11
adcxq %rcx, %r12
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rsi), %rcx, %r8
adcxq %r15, %r13
adoxq %rcx, %r12
adcxq %rbx, %r14
adoxq %r8, %r13
# A[0] * B[2]
mulxq (%rsi), %rcx, %r8
adoxq %rbx, %r14
xorq %r15, %r15
adcxq %rcx, %r11
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rsi), %rdx, %rcx
adcxq %r8, %r12
adoxq %rdx, %r11
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r12
mulxq 8(%rsi), %rcx, %r8
adcxq %rcx, %r13
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rsi), %rdx, %rcx
adcxq %r8, %r14
adoxq %rdx, %r13
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %rcx, %r14
mulxq 24(%rsi), %rcx, %r8
adoxq %rbx, %r15
adcxq %rcx, %r15
# A[0] * B[3]
mulxq (%rsi), %rdx, %rcx
adcxq %r8, %rbx
xorq %r8, %r8
adcxq %rdx, %r12
# A[3] * B[0]
movq 24(%rsi), %rdx
adcxq %rcx, %r13
mulxq (%rax), %rdx, %rcx
adoxq %rdx, %r12
adoxq %rcx, %r13
# A[3] * B[2]
movq 24(%rsi), %rdx
mulxq 16(%rax), %rdx, %rcx
adcxq %rdx, %r14
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %rcx, %r15
mulxq 16(%rsi), %rcx, %rdx
adcxq %r8, %rbx
adoxq %rcx, %r14
adoxq %rdx, %r15
adoxq %r8, %rbx
movq $38, %rdx
mulxq %rbx, %rbx, %rcx
addq %rbx, %r12
adcq $0x00, %rcx
movq $0x7fffffffffffffff, %r8
shldq $0x01, %r12, %rcx
imulq $19, %rcx, %rcx
andq %r8, %r12
xorq %r8, %r8
adoxq %rcx, %r9
mulxq %r13, %rcx, %r13
adcxq %rcx, %r9
adoxq %r13, %r10
mulxq %r14, %rcx, %r14
adcxq %rcx, %r10
adoxq %r14, %r11
mulxq %r15, %rcx, %r15
adcxq %rcx, %r11
adoxq %r15, %r12
adcxq %r8, %r12
# Store
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
addq $16, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_p1p1_to_p3_avx2,.-ge_p1p1_to_p3_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_p2_dbl_avx2
.type ge_p2_dbl_avx2,@function
.align 16
ge_p2_dbl_avx2:
#else
.section __TEXT,__text
.globl _ge_p2_dbl_avx2
.p2align 4
_ge_p2_dbl_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
subq $16, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
leaq 64(%rdi), %rdi
# Square
movq (%rsi), %rdx
movq 8(%rsi), %r9
# A[0] * A[1]
movq %rdx, %rbp
mulxq %r9, %r11, %r12
# A[0] * A[3]
mulxq 24(%rsi), %r13, %r14
# A[2] * A[1]
movq 16(%rsi), %rdx
mulxq %r9, %rcx, %r8
xorq %r10, %r10
adoxq %rcx, %r13
# A[2] * A[3]
mulxq 24(%rsi), %r15, %rbx
adoxq %r8, %r14
# A[2] * A[0]
mulxq %rbp, %rcx, %r8
adoxq %r10, %r15
adcxq %rcx, %r12
adoxq %r10, %rbx
# A[1] * A[3]
movq %r9, %rdx
mulxq 24(%rsi), %rcx, %rdx
adcxq %r8, %r13
adcxq %rcx, %r14
adcxq %rdx, %r15
adcxq %r10, %rbx
# A[0] * A[0]
movq %rbp, %rdx
mulxq %rdx, %r10, %rcx
xorq %rbp, %rbp
adcxq %r11, %r11
# A[1] * A[1]
movq %r9, %rdx
adoxq %rcx, %r11
mulxq %rdx, %rcx, %r8
adcxq %r12, %r12
adoxq %rcx, %r12
adcxq %r13, %r13
# A[2] * A[2]
movq 16(%rsi), %rdx
adoxq %r8, %r13
mulxq %rdx, %r8, %rcx
adcxq %r14, %r14
adoxq %r8, %r14
adcxq %r15, %r15
# A[3] * A[3]
movq 24(%rsi), %rdx
adoxq %rcx, %r15
mulxq %rdx, %rcx, %r8
adcxq %rbx, %rbx
adoxq %rcx, %rbx
adcxq %rbp, %rbp
adoxq %r8, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %rcx
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %rcx, %r13
xorq %rcx, %rcx
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %rcx, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
leaq 32(%rsi), %rsi
# Square
movq (%rsi), %rdx
movq 8(%rsi), %r9
# A[0] * A[1]
movq %rdx, %rbp
mulxq %r9, %r11, %r12
# A[0] * A[3]
mulxq 24(%rsi), %r13, %r14
# A[2] * A[1]
movq 16(%rsi), %rdx
mulxq %r9, %rcx, %r8
xorq %r10, %r10
adoxq %rcx, %r13
# A[2] * A[3]
mulxq 24(%rsi), %r15, %rbx
adoxq %r8, %r14
# A[2] * A[0]
mulxq %rbp, %rcx, %r8
adoxq %r10, %r15
adcxq %rcx, %r12
adoxq %r10, %rbx
# A[1] * A[3]
movq %r9, %rdx
mulxq 24(%rsi), %rcx, %rdx
adcxq %r8, %r13
adcxq %rcx, %r14
adcxq %rdx, %r15
adcxq %r10, %rbx
# A[0] * A[0]
movq %rbp, %rdx
mulxq %rdx, %r10, %rcx
xorq %rbp, %rbp
adcxq %r11, %r11
# A[1] * A[1]
movq %r9, %rdx
adoxq %rcx, %r11
mulxq %rdx, %rcx, %r8
adcxq %r12, %r12
adoxq %rcx, %r12
adcxq %r13, %r13
# A[2] * A[2]
movq 16(%rsi), %rdx
adoxq %r8, %r13
mulxq %rdx, %r8, %rcx
adcxq %r14, %r14
adoxq %r8, %r14
adcxq %r15, %r15
# A[3] * A[3]
movq 24(%rsi), %rdx
adoxq %rcx, %r15
mulxq %rdx, %rcx, %r8
adcxq %rbx, %rbx
adoxq %rcx, %rbx
adcxq %rbp, %rbp
adoxq %r8, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %rcx
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %rcx, %r13
xorq %rcx, %rcx
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %rcx, %r13
# Store
movq %rdi, %rsi
leaq -32(%rdi), %rdi
# Add-Sub
# Add
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %rcx
adcq $0x00, %rcx
shldq $0x01, %r13, %rcx
movq $0x7fffffffffffffff, %r8
imulq $19, %rcx
andq %r8, %r13
# Sub modulus (if overflow)
addq %rcx, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %rcx, %rcx
shldq $0x01, %rbp, %rcx
imulq $-19, %rcx
andq %r8, %rbp
# Add modulus (if underflow)
subq %rcx, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
movq 8(%rsp), %rax
leaq 32(%rax), %rsi
leaq -32(%rdi), %rdi
# Add
movq (%rsi), %r10
movq 8(%rsi), %r11
addq (%rax), %r10
movq 16(%rsi), %r12
adcq 8(%rax), %r11
movq 24(%rsi), %r13
adcq 16(%rax), %r12
adcq 24(%rax), %r13
movq $0x00, %rcx
adcq $0x00, %rcx
shldq $0x01, %r13, %rcx
movq $0x7fffffffffffffff, %r8
imulq $19, %rcx
andq %r8, %r13
# Sub modulus (if overflow)
addq %rcx, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
# Square
movq (%rdi), %rdx
movq 8(%rdi), %r9
# A[0] * A[1]
movq %rdx, %rbp
mulxq %r9, %r11, %r12
# A[0] * A[3]
mulxq 24(%rdi), %r13, %r14
# A[2] * A[1]
movq 16(%rdi), %rdx
mulxq %r9, %rcx, %r8
xorq %r10, %r10
adoxq %rcx, %r13
# A[2] * A[3]
mulxq 24(%rdi), %r15, %rbx
adoxq %r8, %r14
# A[2] * A[0]
mulxq %rbp, %rcx, %r8
adoxq %r10, %r15
adcxq %rcx, %r12
adoxq %r10, %rbx
# A[1] * A[3]
movq %r9, %rdx
mulxq 24(%rdi), %rcx, %rdx
adcxq %r8, %r13
adcxq %rcx, %r14
adcxq %rdx, %r15
adcxq %r10, %rbx
# A[0] * A[0]
movq %rbp, %rdx
mulxq %rdx, %r10, %rcx
xorq %rbp, %rbp
adcxq %r11, %r11
# A[1] * A[1]
movq %r9, %rdx
adoxq %rcx, %r11
mulxq %rdx, %rcx, %r8
adcxq %r12, %r12
adoxq %rcx, %r12
adcxq %r13, %r13
# A[2] * A[2]
movq 16(%rdi), %rdx
adoxq %r8, %r13
mulxq %rdx, %r8, %rcx
adcxq %r14, %r14
adoxq %r8, %r14
adcxq %r15, %r15
# A[3] * A[3]
movq 24(%rdi), %rdx
adoxq %rcx, %r15
mulxq %rdx, %rcx, %r8
adcxq %rbx, %rbx
adoxq %rcx, %rbx
adcxq %rbp, %rbp
adoxq %r8, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %rcx
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %rcx, %r13
xorq %rcx, %rcx
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %rcx, %r13
# Store
leaq 32(%rdi), %rsi
# Sub
subq (%rsi), %r10
sbbq 8(%rsi), %r11
sbbq 16(%rsi), %r12
sbbq 24(%rsi), %r13
sbbq %rcx, %rcx
shldq $0x01, %r13, %rcx
movq $0x7fffffffffffffff, %r8
imulq $-19, %rcx
andq %r8, %r13
# Add modulus (if underflow)
subq %rcx, %r10
sbbq $0x00, %r11
sbbq $0x00, %r12
sbbq $0x00, %r13
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
leaq 64(%rax), %rax
# Square * 2
movq (%rax), %rdx
movq 8(%rax), %r9
# A[0] * A[1]
movq %rdx, %rbp
mulxq %r9, %r11, %r12
# A[0] * A[3]
mulxq 24(%rax), %r13, %r14
# A[2] * A[1]
movq 16(%rax), %rdx
mulxq %r9, %rcx, %r8
xorq %r10, %r10
adoxq %rcx, %r13
# A[2] * A[3]
mulxq 24(%rax), %r15, %rbx
adoxq %r8, %r14
# A[2] * A[0]
mulxq %rbp, %rcx, %r8
adoxq %r10, %r15
adcxq %rcx, %r12
adoxq %r10, %rbx
# A[1] * A[3]
movq %r9, %rdx
mulxq 24(%rax), %rcx, %rdx
adcxq %r8, %r13
adcxq %rcx, %r14
adcxq %rdx, %r15
adcxq %r10, %rbx
# A[0] * A[0]
movq %rbp, %rdx
mulxq %rdx, %r10, %rcx
xorq %rbp, %rbp
adcxq %r11, %r11
# A[1] * A[1]
movq %r9, %rdx
adoxq %rcx, %r11
mulxq %rdx, %rcx, %r8
adcxq %r12, %r12
adoxq %rcx, %r12
adcxq %r13, %r13
# A[2] * A[2]
movq 16(%rax), %rdx
adoxq %r8, %r13
mulxq %rdx, %r8, %rcx
adcxq %r14, %r14
adoxq %r8, %r14
adcxq %r15, %r15
# A[3] * A[3]
movq 24(%rax), %rdx
adoxq %rcx, %r15
mulxq %rdx, %rcx, %r8
adcxq %rbx, %rbx
adoxq %rcx, %rbx
adcxq %rbp, %rbp
adoxq %r8, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r9
addq %rbp, %r13
adcq $0x00, %r9
movq $0x7fffffffffffffff, %rcx
shldq $0x01, %r13, %r9
imulq $19, %r9, %r9
andq %rcx, %r13
xorq %rcx, %rcx
adoxq %r9, %r10
mulxq %r14, %r9, %r14
adcxq %r9, %r10
adoxq %r14, %r11
mulxq %r15, %r9, %r15
adcxq %r9, %r11
adoxq %r15, %r12
mulxq %rbx, %r9, %rbx
adcxq %r9, %r12
adoxq %rbx, %r13
adcxq %rcx, %r13
movq %r13, %r9
shldq $0x01, %r12, %r13
shldq $0x01, %r11, %r12
shldq $0x01, %r10, %r11
shlq $0x01, %r10
movq $0x7fffffffffffffff, %rcx
shrq $62, %r9
andq %rcx, %r13
imulq $19, %r9, %r9
addq %r9, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Store
leaq 64(%rdi), %rsi
leaq 96(%rdi), %rdi
# Sub
subq (%rsi), %r10
sbbq 8(%rsi), %r11
sbbq 16(%rsi), %r12
sbbq 24(%rsi), %r13
sbbq %rcx, %rcx
shldq $0x01, %r13, %rcx
movq $0x7fffffffffffffff, %r8
imulq $-19, %rcx
andq %r8, %r13
# Add modulus (if underflow)
subq %rcx, %r10
sbbq $0x00, %r11
sbbq $0x00, %r12
sbbq $0x00, %r13
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
addq $16, %rsp
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_p2_dbl_avx2,.-ge_p2_dbl_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_madd_avx2
.type ge_madd_avx2,@function
.align 16
ge_madd_avx2:
#else
.section __TEXT,__text
.globl _ge_madd_avx2
.p2align 4
_ge_madd_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %rax
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rax, 16(%rsp)
leaq 96(%rsi), %rcx
leaq 64(%rax), %rax
leaq 96(%rdi), %rdi
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rcx), %r10, %r11
# A[2] * B[0]
mulxq 16(%rcx), %r12, %r13
# A[1] * B[0]
mulxq 8(%rcx), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rcx), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rcx), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rcx), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rcx), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rcx), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rcx), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rcx), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rcx), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rcx), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rcx), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rcx), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rcx), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rcx), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %rsi, %rcx
leaq 32(%rsi), %rax
leaq -64(%rdi), %rsi
leaq -96(%rdi), %rdi
# Add-Sub
# Add
movq (%rax), %r10
movq 8(%rax), %r11
movq 16(%rax), %r12
movq 24(%rax), %r13
movq %r10, %r14
addq (%rcx), %r10
movq %r11, %r15
adcq 8(%rcx), %r11
movq %r12, %rbx
adcq 16(%rcx), %r12
movq %r13, %rbp
adcq 24(%rcx), %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rcx), %r14
sbbq 8(%rcx), %r15
sbbq 16(%rcx), %rbx
sbbq 24(%rcx), %rbp
sbbq %r8, %r8
shldq $0x01, %rbp, %r8
imulq $-19, %r8
andq %r9, %rbp
# Add modulus (if underflow)
subq %r8, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
movq 16(%rsp), %rax
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rdi), %r10, %r11
# A[2] * B[0]
mulxq 16(%rdi), %r12, %r13
# A[1] * B[0]
mulxq 8(%rdi), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rdi), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rdi), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rdi), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rdi), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rdi), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rdi), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rdi), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rdi), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rdi), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rdi), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rdi), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rdi), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rdi), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
leaq 32(%rax), %rax
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rsi), %r10, %r11
# A[2] * B[0]
mulxq 16(%rsi), %r12, %r13
# A[1] * B[0]
mulxq 8(%rsi), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rsi), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rsi), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rsi), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rsi), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rsi), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rsi), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rsi), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rsi), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rsi), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rsi), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rsi), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rsi), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rsi), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
# Add-Sub
# Add
movq (%rdi), %r10
movq 8(%rdi), %r11
movq 16(%rdi), %r12
movq 24(%rdi), %r13
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %r8, %r8
shldq $0x01, %rbp, %r8
imulq $-19, %r8
andq %r9, %rbp
# Add modulus (if underflow)
subq %r8, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
movq %r14, (%rdi)
movq %r15, 8(%rdi)
movq %rbx, 16(%rdi)
movq %rbp, 24(%rdi)
leaq 64(%rcx), %rcx
# Double
movq (%rcx), %r10
movq 8(%rcx), %r11
addq %r10, %r10
movq 16(%rcx), %r12
adcq %r11, %r11
movq 24(%rcx), %r13
adcq %r12, %r12
adcq %r13, %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
leaq 96(%rdi), %rsi
leaq 64(%rdi), %rdi
# Add-Sub
# Add
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %r8, %r8
shldq $0x01, %rbp, %r8
imulq $-19, %r8
andq %r9, %rbp
# Add modulus (if underflow)
subq %r8, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
addq $24, %rsp
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_madd_avx2,.-ge_madd_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_msub_avx2
.type ge_msub_avx2,@function
.align 16
ge_msub_avx2:
#else
.section __TEXT,__text
.globl _ge_msub_avx2
.p2align 4
_ge_msub_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %rax
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rax, 16(%rsp)
leaq 96(%rsi), %rcx
leaq 64(%rax), %rax
leaq 96(%rdi), %rdi
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rcx), %r10, %r11
# A[2] * B[0]
mulxq 16(%rcx), %r12, %r13
# A[1] * B[0]
mulxq 8(%rcx), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rcx), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rcx), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rcx), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rcx), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rcx), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rcx), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rcx), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rcx), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rcx), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rcx), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rcx), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rcx), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rcx), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %rsi, %rcx
leaq 32(%rsi), %rax
leaq -64(%rdi), %rsi
leaq -96(%rdi), %rdi
# Add-Sub
# Add
movq (%rax), %r10
movq 8(%rax), %r11
movq 16(%rax), %r12
movq 24(%rax), %r13
movq %r10, %r14
addq (%rcx), %r10
movq %r11, %r15
adcq 8(%rcx), %r11
movq %r12, %rbx
adcq 16(%rcx), %r12
movq %r13, %rbp
adcq 24(%rcx), %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rcx), %r14
sbbq 8(%rcx), %r15
sbbq 16(%rcx), %rbx
sbbq 24(%rcx), %rbp
sbbq %r8, %r8
shldq $0x01, %rbp, %r8
imulq $-19, %r8
andq %r9, %rbp
# Add modulus (if underflow)
subq %r8, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
movq 16(%rsp), %rax
leaq 32(%rax), %rax
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rdi), %r10, %r11
# A[2] * B[0]
mulxq 16(%rdi), %r12, %r13
# A[1] * B[0]
mulxq 8(%rdi), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rdi), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rdi), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rdi), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rdi), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rdi), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rdi), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rdi), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rdi), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rdi), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rdi), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rdi), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rdi), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rdi), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
leaq -32(%rax), %rax
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rsi), %r10, %r11
# A[2] * B[0]
mulxq 16(%rsi), %r12, %r13
# A[1] * B[0]
mulxq 8(%rsi), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rsi), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rsi), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rsi), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rsi), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rsi), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rsi), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rsi), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rsi), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rsi), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rsi), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rsi), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rsi), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rsi), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
# Add-Sub
# Add
movq (%rdi), %r10
movq 8(%rdi), %r11
movq 16(%rdi), %r12
movq 24(%rdi), %r13
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %r8, %r8
shldq $0x01, %rbp, %r8
imulq $-19, %r8
andq %r9, %rbp
# Add modulus (if underflow)
subq %r8, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
movq %r14, (%rdi)
movq %r15, 8(%rdi)
movq %rbx, 16(%rdi)
movq %rbp, 24(%rdi)
leaq 64(%rcx), %rcx
# Double
movq (%rcx), %r10
movq 8(%rcx), %r11
addq %r10, %r10
movq 16(%rcx), %r12
adcq %r11, %r11
movq 24(%rcx), %r13
adcq %r12, %r12
adcq %r13, %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
leaq 96(%rdi), %rsi
leaq 64(%rdi), %rdi
# Add-Sub
# Add
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %r8, %r8
shldq $0x01, %rbp, %r8
imulq $-19, %r8
andq %r9, %rbp
# Add modulus (if underflow)
subq %r8, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
movq %r14, (%rdi)
movq %r15, 8(%rdi)
movq %rbx, 16(%rdi)
movq %rbp, 24(%rdi)
addq $24, %rsp
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_msub_avx2,.-ge_msub_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_add_avx2
.type ge_add_avx2,@function
.align 16
ge_add_avx2:
#else
.section __TEXT,__text
.globl _ge_add_avx2
.p2align 4
_ge_add_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %rax
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rax, 16(%rsp)
leaq 96(%rsi), %rcx
leaq 96(%rax), %rax
leaq 96(%rdi), %rdi
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rcx), %r10, %r11
# A[2] * B[0]
mulxq 16(%rcx), %r12, %r13
# A[1] * B[0]
mulxq 8(%rcx), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rcx), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rcx), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rcx), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rcx), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rcx), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rcx), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rcx), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rcx), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rcx), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rcx), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rcx), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rcx), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rcx), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %rsi, %rcx
leaq 32(%rsi), %rax
leaq -64(%rdi), %rsi
leaq -96(%rdi), %rdi
# Add-Sub
# Add
movq (%rax), %r10
movq 8(%rax), %r11
movq 16(%rax), %r12
movq 24(%rax), %r13
movq %r10, %r14
addq (%rcx), %r10
movq %r11, %r15
adcq 8(%rcx), %r11
movq %r12, %rbx
adcq 16(%rcx), %r12
movq %r13, %rbp
adcq 24(%rcx), %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rcx), %r14
sbbq 8(%rcx), %r15
sbbq 16(%rcx), %rbx
sbbq 24(%rcx), %rbp
sbbq %r8, %r8
shldq $0x01, %rbp, %r8
imulq $-19, %r8
andq %r9, %rbp
# Add modulus (if underflow)
subq %r8, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
movq 16(%rsp), %rax
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rdi), %r10, %r11
# A[2] * B[0]
mulxq 16(%rdi), %r12, %r13
# A[1] * B[0]
mulxq 8(%rdi), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rdi), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rdi), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rdi), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rdi), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rdi), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rdi), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rdi), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rdi), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rdi), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rdi), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rdi), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rdi), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rdi), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
leaq 32(%rax), %rax
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rsi), %r10, %r11
# A[2] * B[0]
mulxq 16(%rsi), %r12, %r13
# A[1] * B[0]
mulxq 8(%rsi), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rsi), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rsi), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rsi), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rsi), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rsi), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rsi), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rsi), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rsi), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rsi), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rsi), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rsi), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rsi), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rsi), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
leaq 64(%rcx), %rcx
leaq 32(%rax), %rax
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rcx), %r10, %r11
# A[2] * B[0]
mulxq 16(%rcx), %r12, %r13
# A[1] * B[0]
mulxq 8(%rcx), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rcx), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rcx), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rcx), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rcx), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rcx), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rcx), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rcx), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rcx), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rcx), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rcx), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rcx), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rcx), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rcx), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
leaq 64(%rdi), %rdi
# Double
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
leaq -64(%rdi), %rdi
# Add-Sub
# Add
movq (%rdi), %r10
movq 8(%rdi), %r11
movq 16(%rdi), %r12
movq 24(%rdi), %r13
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %r8, %r8
shldq $0x01, %rbp, %r8
imulq $-19, %r8
andq %r9, %rbp
# Add modulus (if underflow)
subq %r8, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
movq %r14, (%rdi)
movq %r15, 8(%rdi)
movq %rbx, 16(%rdi)
movq %rbp, 24(%rdi)
leaq 96(%rdi), %rsi
leaq 64(%rdi), %rdi
# Add-Sub
# Add
movq (%rdi), %r10
movq 8(%rdi), %r11
movq 16(%rdi), %r12
movq 24(%rdi), %r13
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %r8, %r8
shldq $0x01, %rbp, %r8
imulq $-19, %r8
andq %r9, %rbp
# Add modulus (if underflow)
subq %r8, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
addq $24, %rsp
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_add_avx2,.-ge_add_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl ge_sub_avx2
.type ge_sub_avx2,@function
.align 16
ge_sub_avx2:
#else
.section __TEXT,__text
.globl _ge_sub_avx2
.p2align 4
_ge_sub_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %rax
subq $24, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rax, 16(%rsp)
leaq 96(%rsi), %rcx
leaq 96(%rax), %rax
leaq 96(%rdi), %rdi
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rcx), %r10, %r11
# A[2] * B[0]
mulxq 16(%rcx), %r12, %r13
# A[1] * B[0]
mulxq 8(%rcx), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rcx), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rcx), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rcx), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rcx), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rcx), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rcx), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rcx), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rcx), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rcx), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rcx), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rcx), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rcx), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rcx), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %rsi, %rcx
leaq 32(%rsi), %rax
leaq -64(%rdi), %rsi
leaq -96(%rdi), %rdi
# Add-Sub
# Add
movq (%rax), %r10
movq 8(%rax), %r11
movq 16(%rax), %r12
movq 24(%rax), %r13
movq %r10, %r14
addq (%rcx), %r10
movq %r11, %r15
adcq 8(%rcx), %r11
movq %r12, %rbx
adcq 16(%rcx), %r12
movq %r13, %rbp
adcq 24(%rcx), %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rcx), %r14
sbbq 8(%rcx), %r15
sbbq 16(%rcx), %rbx
sbbq 24(%rcx), %rbp
sbbq %r8, %r8
shldq $0x01, %rbp, %r8
imulq $-19, %r8
andq %r9, %rbp
# Add modulus (if underflow)
subq %r8, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
movq 16(%rsp), %rax
leaq 32(%rax), %rax
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rdi), %r10, %r11
# A[2] * B[0]
mulxq 16(%rdi), %r12, %r13
# A[1] * B[0]
mulxq 8(%rdi), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rdi), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rdi), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rdi), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rdi), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rdi), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rdi), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rdi), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rdi), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rdi), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rdi), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rdi), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rdi), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rdi), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
leaq -32(%rax), %rax
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rsi), %r10, %r11
# A[2] * B[0]
mulxq 16(%rsi), %r12, %r13
# A[1] * B[0]
mulxq 8(%rsi), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rsi), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rsi), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rsi), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rsi), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rsi), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rsi), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rsi), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rsi), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rsi), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rsi), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rsi), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rsi), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rsi), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
leaq 64(%rcx), %rcx
leaq 64(%rax), %rax
# Multiply
# A[0] * B[0]
movq (%rax), %rdx
mulxq (%rcx), %r10, %r11
# A[2] * B[0]
mulxq 16(%rcx), %r12, %r13
# A[1] * B[0]
mulxq 8(%rcx), %r8, %r9
xorq %rbp, %rbp
adcxq %r8, %r11
# A[3] * B[1]
movq 8(%rax), %rdx
mulxq 24(%rcx), %r14, %r15
adcxq %r9, %r12
# A[0] * B[1]
mulxq (%rcx), %r8, %r9
adoxq %r8, %r11
# A[2] * B[1]
mulxq 16(%rcx), %r8, %rbx
adoxq %r9, %r12
adcxq %r8, %r13
# A[1] * B[2]
movq 16(%rax), %rdx
mulxq 8(%rcx), %r8, %r9
adcxq %rbx, %r14
adoxq %r8, %r13
adcxq %rbp, %r15
adoxq %r9, %r14
# A[0] * B[2]
mulxq (%rcx), %r8, %r9
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %r8, %r12
# A[1] * B[1]
movq 8(%rax), %rdx
mulxq 8(%rcx), %rdx, %r8
adcxq %r9, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r13
mulxq 8(%rcx), %r8, %r9
adcxq %r8, %r14
# A[2] * B[2]
movq 16(%rax), %rdx
mulxq 16(%rcx), %rdx, %r8
adcxq %r9, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%rax), %rdx
adoxq %r8, %r15
mulxq 24(%rcx), %r8, %r9
adoxq %rbp, %rbx
adcxq %r8, %rbx
# A[0] * B[3]
mulxq (%rcx), %rdx, %r8
adcxq %r9, %rbp
xorq %r9, %r9
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rcx), %rdx
adcxq %r8, %r14
mulxq (%rax), %rdx, %r8
adoxq %rdx, %r13
adoxq %r8, %r14
# A[3] * B[2]
movq 24(%rcx), %rdx
mulxq 16(%rax), %rdx, %r8
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%rax), %rdx
adcxq %r8, %rbx
mulxq 16(%rcx), %r8, %rdx
adcxq %r9, %rbp
adoxq %r8, %r15
adoxq %rdx, %rbx
adoxq %r9, %rbp
movq $38, %rdx
mulxq %rbp, %rbp, %r8
addq %rbp, %r13
adcq $0x00, %r8
movq $0x7fffffffffffffff, %r9
shldq $0x01, %r13, %r8
imulq $19, %r8, %r8
andq %r9, %r13
xorq %r9, %r9
adoxq %r8, %r10
mulxq %r14, %r8, %r14
adcxq %r8, %r10
adoxq %r14, %r11
mulxq %r15, %r8, %r15
adcxq %r8, %r11
adoxq %r15, %r12
mulxq %rbx, %r8, %rbx
adcxq %r8, %r12
adoxq %rbx, %r13
adcxq %r9, %r13
# Store
leaq 64(%rdi), %rdi
# Double
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
leaq -64(%rdi), %rdi
# Add-Sub
# Add
movq (%rdi), %r10
movq 8(%rdi), %r11
movq 16(%rdi), %r12
movq 24(%rdi), %r13
movq %r10, %r14
addq (%rsi), %r10
movq %r11, %r15
adcq 8(%rsi), %r11
movq %r12, %rbx
adcq 16(%rsi), %r12
movq %r13, %rbp
adcq 24(%rsi), %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rsi), %r14
sbbq 8(%rsi), %r15
sbbq 16(%rsi), %rbx
sbbq 24(%rsi), %rbp
sbbq %r8, %r8
shldq $0x01, %rbp, %r8
imulq $-19, %r8
andq %r9, %rbp
# Add modulus (if underflow)
subq %r8, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rsi)
movq %r11, 8(%rsi)
movq %r12, 16(%rsi)
movq %r13, 24(%rsi)
movq %r14, (%rdi)
movq %r15, 8(%rdi)
movq %rbx, 16(%rdi)
movq %rbp, 24(%rdi)
leaq 64(%rdi), %rsi
leaq 96(%rdi), %rdi
# Add-Sub
# Add
movq (%rsi), %r10
movq 8(%rsi), %r11
movq 16(%rsi), %r12
movq 24(%rsi), %r13
movq %r10, %r14
addq (%rdi), %r10
movq %r11, %r15
adcq 8(%rdi), %r11
movq %r12, %rbx
adcq 16(%rdi), %r12
movq %r13, %rbp
adcq 24(%rdi), %r13
movq $0x00, %r8
adcq $0x00, %r8
shldq $0x01, %r13, %r8
movq $0x7fffffffffffffff, %r9
imulq $19, %r8
andq %r9, %r13
# Sub modulus (if overflow)
addq %r8, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
# Sub
subq (%rdi), %r14
sbbq 8(%rdi), %r15
sbbq 16(%rdi), %rbx
sbbq 24(%rdi), %rbp
sbbq %r8, %r8
shldq $0x01, %rbp, %r8
imulq $-19, %r8
andq %r9, %rbp
# Add modulus (if underflow)
subq %r8, %r14
sbbq $0x00, %r15
sbbq $0x00, %rbx
sbbq $0x00, %rbp
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, (%rsi)
movq %r15, 8(%rsi)
movq %rbx, 16(%rsi)
movq %rbp, 24(%rsi)
addq $24, %rsp
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size ge_sub_avx2,.-ge_sub_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl sc_reduce_avx2
.type sc_reduce_avx2,@function
.align 16
sc_reduce_avx2:
#else
.section __TEXT,__text
.globl _sc_reduce_avx2
.p2align 4
_sc_reduce_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq (%rdi), %r8
movq 8(%rdi), %r9
movq 16(%rdi), %r10
movq 24(%rdi), %r11
movq 32(%rdi), %r12
movq 40(%rdi), %r13
movq 48(%rdi), %r14
movq 56(%rdi), %r15
movq %r15, %rax
movq $0xfffffffffffffff, %rcx
shrq $56, %rax
shldq $4, %r14, %r15
shldq $4, %r13, %r14
shldq $4, %r12, %r13
shldq $4, %r11, %r12
andq %rcx, %r11
andq %rcx, %r15
# Add order times bits 504..511
subq %rax, %r14
sbbq $0x00, %r15
movq $0xeb2106215d086329, %rdx
mulxq %rax, %rsi, %rcx
movq $0xa7ed9ce5a30a2c13, %rdx
addq %rsi, %r13
mulxq %rax, %rsi, %rbx
adcq $0x00, %rcx
addq %rsi, %r12
adcq %rbx, %r13
adcq %rcx, %r14
adcq $0x00, %r15
# Sub product of top 4 words and order
movq $0xa7ed9ce5a30a2c13, %rdx
mulxq %r12, %rcx, %rax
addq %rcx, %r8
adcq %rax, %r9
mulxq %r14, %rcx, %rax
adcq %rcx, %r10
adcq %rax, %r11
movq $0x00, %rsi
adcq $0x00, %rsi
mulxq %r13, %rcx, %rax
addq %rcx, %r9
adcq %rax, %r10
mulxq %r15, %rcx, %rax
adcq %rcx, %r11
adcq %rax, %rsi
movq $0xeb2106215d086329, %rdx
mulxq %r12, %rcx, %rax
addq %rcx, %r9
adcq %rax, %r10
mulxq %r14, %rcx, %rax
adcq %rcx, %r11
adcq %rax, %rsi
movq $0x00, %rbx
adcq $0x00, %rbx
mulxq %r13, %rcx, %rax
addq %rcx, %r10
adcq %rax, %r11
mulxq %r15, %rcx, %rax
adcq %rcx, %rsi
adcq %rax, %rbx
subq %r12, %r10
movq %rsi, %r12
sbbq %r13, %r11
movq %rbx, %r13
sbbq %r14, %r12
sbbq %r15, %r13
movq %r13, %rax
sarq $57, %rax
# Conditionally subtract order starting at bit 125
movq $0xa000000000000000, %rsi
movq $0xcb024c634b9eba7d, %rbx
movq $0x29bdf3bd45ef39a, %rbp
movq $0x200000000000000, %rcx
andq %rax, %rsi
andq %rax, %rbx
andq %rax, %rbp
andq %rax, %rcx
addq %rsi, %r9
adcq %rbx, %r10
adcq %rbp, %r11
adcq $0x00, %r12
adcq %rcx, %r13
# Move bits 252-376 to own registers
movq $0xfffffffffffffff, %rax
shldq $4, %r12, %r13
shldq $4, %r11, %r12
andq %rax, %r11
# Sub product of top 2 words and order
# * -5812631a5cf5d3ed
movq $0xa7ed9ce5a30a2c13, %rdx
mulxq %r12, %rbp, %rax
movq $0x00, %rsi
addq %rbp, %r8
adcq %rax, %r9
mulxq %r13, %rbp, %rax
adcq $0x00, %rsi
addq %rbp, %r9
adcq %rax, %rsi
# * -14def9dea2f79cd7
movq $0xeb2106215d086329, %rdx
mulxq %r12, %rbp, %rax
movq $0x00, %rbx
addq %rbp, %r9
adcq %rax, %r10
mulxq %r13, %rbp, %rax
adcq $0x00, %rbx
addq %rbp, %r10
adcq %rax, %rbx
# Add overflows at 2 * 64
movq $0xfffffffffffffff, %rcx
andq %rcx, %r11
addq %rsi, %r10
adcq %rbx, %r11
# Subtract top at 2 * 64
subq %r12, %r10
sbbq %r13, %r11
sbbq %rcx, %rcx
# Conditional sub order
movq $0x5812631a5cf5d3ed, %rsi
movq $0x14def9dea2f79cd6, %rbx
movq $0x1000000000000000, %rbp
andq %rcx, %rsi
andq %rcx, %rbx
andq %rcx, %rbp
addq %rsi, %r8
movq $0xfffffffffffffff, %rsi
adcq %rbx, %r9
adcq $0x00, %r10
adcq %rbp, %r11
andq %rsi, %r11
# Store result
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sc_reduce_avx2,.-sc_reduce_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl sc_muladd_avx2
.type sc_muladd_avx2,@function
.align 16
sc_muladd_avx2:
#else
.section __TEXT,__text
.globl _sc_muladd_avx2
.p2align 4
_sc_muladd_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %r8
movq %rcx, %r9
# Multiply
# A[0] * B[0]
movq (%r8), %rdx
mulxq (%rsi), %r10, %r11
# A[2] * B[0]
mulxq 16(%rsi), %r12, %r13
# A[1] * B[0]
mulxq 8(%rsi), %rax, %rcx
xorq %rbp, %rbp
adcxq %rax, %r11
# A[3] * B[1]
movq 8(%r8), %rdx
mulxq 24(%rsi), %r14, %r15
adcxq %rcx, %r12
# A[0] * B[1]
mulxq (%rsi), %rax, %rcx
adoxq %rax, %r11
# A[2] * B[1]
mulxq 16(%rsi), %rax, %rbx
adoxq %rcx, %r12
adcxq %rax, %r13
# A[1] * B[2]
movq 16(%r8), %rdx
mulxq 8(%rsi), %rax, %rcx
adcxq %rbx, %r14
adoxq %rax, %r13
adcxq %rbp, %r15
adoxq %rcx, %r14
# A[0] * B[2]
mulxq (%rsi), %rax, %rcx
adoxq %rbp, %r15
xorq %rbx, %rbx
adcxq %rax, %r12
# A[1] * B[1]
movq 8(%r8), %rdx
mulxq 8(%rsi), %rdx, %rax
adcxq %rcx, %r13
adoxq %rdx, %r12
# A[1] * B[3]
movq 24(%r8), %rdx
adoxq %rax, %r13
mulxq 8(%rsi), %rax, %rcx
adcxq %rax, %r14
# A[2] * B[2]
movq 16(%r8), %rdx
mulxq 16(%rsi), %rdx, %rax
adcxq %rcx, %r15
adoxq %rdx, %r14
# A[3] * B[3]
movq 24(%r8), %rdx
adoxq %rax, %r15
mulxq 24(%rsi), %rax, %rcx
adoxq %rbp, %rbx
adcxq %rax, %rbx
# A[0] * B[3]
mulxq (%rsi), %rdx, %rax
adcxq %rcx, %rbp
xorq %rcx, %rcx
adcxq %rdx, %r13
# A[3] * B[0]
movq 24(%rsi), %rdx
adcxq %rax, %r14
mulxq (%r8), %rdx, %rax
adoxq %rdx, %r13
adoxq %rax, %r14
# A[3] * B[2]
movq 24(%rsi), %rdx
mulxq 16(%r8), %rdx, %rax
adcxq %rdx, %r15
# A[2] * B[3]
movq 24(%r8), %rdx
adcxq %rax, %rbx
mulxq 16(%rsi), %rax, %rdx
adcxq %rcx, %rbp
adoxq %rax, %r15
adoxq %rdx, %rbx
adoxq %rcx, %rbp
# Add c to a * b
addq (%r9), %r10
adcq 8(%r9), %r11
adcq 16(%r9), %r12
adcq 24(%r9), %r13
adcq $0x00, %r14
adcq $0x00, %r15
adcq $0x00, %rbx
adcq $0x00, %rbp
movq %rbp, %rax
movq $0xfffffffffffffff, %rcx
shrq $56, %rax
shldq $4, %rbx, %rbp
shldq $4, %r15, %rbx
shldq $4, %r14, %r15
shldq $4, %r13, %r14
andq %rcx, %r13
andq %rcx, %rbp
# Add order times bits 504..507
subq %rax, %rbx
sbbq $0x00, %rbp
movq $0xeb2106215d086329, %rdx
mulxq %rax, %rsi, %rcx
movq $0xa7ed9ce5a30a2c13, %rdx
addq %rsi, %r15
mulxq %rax, %rsi, %r8
adcq $0x00, %rcx
addq %rsi, %r14
adcq %r8, %r15
adcq %rcx, %rbx
adcq $0x00, %rbp
# Sub product of top 4 words and order
movq $0xa7ed9ce5a30a2c13, %rdx
mulxq %r14, %rcx, %rax
addq %rcx, %r10
adcq %rax, %r11
mulxq %rbx, %rcx, %rax
adcq %rcx, %r12
adcq %rax, %r13
movq $0x00, %rsi
adcq $0x00, %rsi
mulxq %r15, %rcx, %rax
addq %rcx, %r11
adcq %rax, %r12
mulxq %rbp, %rcx, %rax
adcq %rcx, %r13
adcq %rax, %rsi
movq $0xeb2106215d086329, %rdx
mulxq %r14, %rcx, %rax
addq %rcx, %r11
adcq %rax, %r12
mulxq %rbx, %rcx, %rax
adcq %rcx, %r13
adcq %rax, %rsi
movq $0x00, %r8
adcq $0x00, %r8
mulxq %r15, %rcx, %rax
addq %rcx, %r12
adcq %rax, %r13
mulxq %rbp, %rcx, %rax
adcq %rcx, %rsi
adcq %rax, %r8
subq %r14, %r12
movq %rsi, %r14
sbbq %r15, %r13
movq %r8, %r15
sbbq %rbx, %r14
sbbq %rbp, %r15
movq %r15, %rax
sarq $57, %rax
# Conditionally subtract order starting at bit 125
movq $0xa000000000000000, %rsi
movq $0xcb024c634b9eba7d, %r8
movq $0x29bdf3bd45ef39a, %r9
movq $0x200000000000000, %rcx
andq %rax, %rsi
andq %rax, %r8
andq %rax, %r9
andq %rax, %rcx
addq %rsi, %r11
adcq %r8, %r12
adcq %r9, %r13
adcq $0x00, %r14
adcq %rcx, %r15
# Move bits 252-376 to own registers
movq $0xfffffffffffffff, %rax
shldq $4, %r14, %r15
shldq $4, %r13, %r14
andq %rax, %r13
# Sub product of top 2 words and order
# * -5812631a5cf5d3ed
movq $0xa7ed9ce5a30a2c13, %rdx
mulxq %r14, %r9, %rax
movq $0x00, %rsi
addq %r9, %r10
adcq %rax, %r11
mulxq %r15, %r9, %rax
adcq $0x00, %rsi
addq %r9, %r11
adcq %rax, %rsi
# * -14def9dea2f79cd7
movq $0xeb2106215d086329, %rdx
mulxq %r14, %r9, %rax
movq $0x00, %r8
addq %r9, %r11
adcq %rax, %r12
mulxq %r15, %r9, %rax
adcq $0x00, %r8
addq %r9, %r12
adcq %rax, %r8
# Add overflows at 2 * 64
movq $0xfffffffffffffff, %rcx
andq %rcx, %r13
addq %rsi, %r12
adcq %r8, %r13
# Subtract top at 2 * 64
subq %r14, %r12
sbbq %r15, %r13
sbbq %rcx, %rcx
# Conditional sub order
movq $0x5812631a5cf5d3ed, %rsi
movq $0x14def9dea2f79cd6, %r8
movq $0x1000000000000000, %r9
andq %rcx, %rsi
andq %rcx, %r8
andq %rcx, %r9
addq %rsi, %r10
movq $0xfffffffffffffff, %rsi
adcq %r8, %r11
adcq $0x00, %r12
adcq %r9, %r13
andq %rsi, %r13
# Store result
movq %r10, (%rdi)
movq %r11, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sc_muladd_avx2,.-sc_muladd_avx2
#endif /* __APPLE__ */
#endif /* HAVE_ED25519 */
#endif /* HAVE_INTEL_AVX2 */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
aerror2/erfly6
| 9,864
|
Sources/I6/SEGGER_THUMB_Startup.s
|
/*********************************************************************
* SEGGER Microcontroller GmbH *
* The Embedded Experts *
**********************************************************************
* *
* (c) 2014 - 2020 SEGGER Microcontroller GmbH *
* *
* www.segger.com Support: support@segger.com *
* *
**********************************************************************
* *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or *
* without modification, are permitted provided that the following *
* condition is met: *
* *
* - Redistributions of source code must retain the above copyright *
* notice, this condition and the following disclaimer. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND *
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, *
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *
* DISCLAIMED. IN NO EVENT SHALL SEGGER Microcontroller BE LIABLE FOR *
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR *
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; *
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF *
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE *
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH *
* DAMAGE. *
* *
**********************************************************************
-------------------------- END-OF-HEADER -----------------------------
File : SEGGER_THUMB_Startup.s
Purpose : Generic runtime init startup code for ARM CPUs running
in THUMB mode.
Designed to work with the SEGGER linker to produce
smallest possible executables.
This file does not normally require any customization.
Additional information:
Preprocessor Definitions
FULL_LIBRARY
If defined then
- argc, argv are set up by calling SEGGER_SEMIHOST_GetArgs().
- the exit symbol is defined and executes on return from main.
- the exit symbol calls destructors, atexit functions and then
calls SEGGER_SEMIHOST_Exit().
If not defined then
- argc and argv are not valid (main is assumed to not take parameters)
- the exit symbol is defined, executes on return from main and
halts in a loop.
*/
.syntax unified
/*********************************************************************
*
* Defines, configurable
*
**********************************************************************
*/
#ifndef APP_ENTRY_POINT
#define APP_ENTRY_POINT main
#endif
#ifndef ARGSSPACE
#define ARGSSPACE 128
#endif
/*********************************************************************
*
* Macros
*
**********************************************************************
*/
//
// Declare a label as function symbol (without switching sections)
//
.macro MARK_FUNC Name
.global \Name
.thumb_func
.code 16
\Name:
.endm
//
// Declare a regular function.
// Functions from the startup are placed in the init section.
//
.macro START_FUNC Name
.section .init.\Name, "ax"
.global \Name
.balign 2
.thumb_func
.code 16
\Name:
.endm
//
// Declare a weak function
//
.macro WEAK_FUNC Name
.section .init.\Name, "ax", %progbits
.global \Name
.weak \Name
.balign 2
.thumb_func
.code 16
\Name:
.endm
//
// Mark the end of a function and calculate its size
//
.macro END_FUNC name
.size \name,.-\name
.endm
/*********************************************************************
*
* Externals
*
**********************************************************************
*/
.extern APP_ENTRY_POINT // typically main
/*********************************************************************
*
* Global functions
*
**********************************************************************
*/
/*********************************************************************
*
* _start
*
* Function description
* Entry point for the startup code.
* Usually called by the reset handler.
* Performs all initialisation, based on the entries in the
* linker-generated init table, then calls main().
* It is device independent, so there should not be any need for an
* end-user to modify it.
*
* Additional information
* At this point, the stack pointer should already have been
* initialized
* - by hardware (such as on Cortex-M),
* - by the device-specific reset handler,
* - or by the debugger (such as for RAM Code).
*/
#undef L
#define L(label) .L_start_##label
START_FUNC _start
//
// Call linker init functions which in turn performs the following:
// * Perform segment init
// * Perform heap init (if used)
// * Call constructors of global Objects (if any exist)
//
ldr R4, =__SEGGER_init_table__ // Set table pointer to start of initialization table
L(RunInit):
ldr R0, [R4] // Get next initialization function from table
adds R4, R4, #4 // Increment table pointer to point to function arguments
blx R0 // Call initialization function
b L(RunInit)
//
MARK_FUNC __SEGGER_init_done
//
// Time to call main(), the application entry point.
//
#ifndef FULL_LIBRARY
//
// In a real embedded application ("Free-standing environment"),
// main() does not get any arguments,
// which means it is not necessary to init R0 and R1.
//
bl APP_ENTRY_POINT // Call to application entry point (usually main())
END_FUNC _start
//
// end of _start
// Fall-through to exit if main ever returns.
//
MARK_FUNC exit
//
// In a free-standing environment, if returned from application:
// Loop forever.
//
b .
.size exit,.-exit
#else
//
// In a hosted environment,
// we need to load R0 and R1 with argc and argv, in order to handle
// the command line arguments.
// This is required for some programs running under control of a
// debugger, such as automated tests.
//
movs R0, #ARGSSPACE
ldr R1, =__SEGGER_init_arg_data
bl SEGGER_SEMIHOST_GetArgs
ldr R1, =__SEGGER_init_arg_data
bl APP_ENTRY_POINT // Call to application entry point (usually main())
bl exit // Call exit function
b . // If we unexpectedly return from exit, hang.
END_FUNC _start
#endif
//
#ifdef FULL_LIBRARY
/*********************************************************************
*
* exit
*
* Function description
* Exit of the system.
* Called on return from application entry point or explicit call
* to exit.
*
* Additional information
* In a hosted environment exit gracefully, by
* saving the return value,
* calling destructurs of global objects,
* calling registered atexit functions,
* and notifying the host/debugger.
*/
#undef L
#define L(label) .L_exit_##label
WEAK_FUNC exit
mov R5, R0 // Save the exit parameter/return result
//
// Call destructors
//
ldr R0, =__dtors_start__ // Pointer to destructor list
ldr R1, =__dtors_end__
L(Loop):
cmp R0, R1
beq L(End) // Reached end of destructor list? => Done
ldr R2, [R0] // Load current destructor address into R2
adds R0, R0, #4 // Increment pointer
push {R0-R1} // Save R0 and R1
blx R2 // Call destructor
pop {R0-R1} // Restore R0 and R1
b L(Loop)
L(End):
//
// Call atexit functions
//
bl __SEGGER_RTL_execute_at_exit_fns
//
// Call debug_exit with return result/exit parameter
//
mov R0, R5
bl SEGGER_SEMIHOST_Exit
//
// If execution is not terminated, loop forever
//
L(ExitLoop):
b L(ExitLoop) // Loop forever.
END_FUNC exit
#endif
#ifdef FULL_LIBRARY
.bss
__SEGGER_init_arg_data:
.space ARGSSPACE
.size __SEGGER_init_arg_data, .-__SEGGER_init_arg_data
.type __SEGGER_init_arg_data, %object
#endif
/*************************** End of file ****************************/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_WakeUpFromStop/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aerror2/erfly6
| 9,864
|
Sources/I6X/SEGGER_THUMB_Startup.s
|
/*********************************************************************
* SEGGER Microcontroller GmbH *
* The Embedded Experts *
**********************************************************************
* *
* (c) 2014 - 2020 SEGGER Microcontroller GmbH *
* *
* www.segger.com Support: support@segger.com *
* *
**********************************************************************
* *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or *
* without modification, are permitted provided that the following *
* condition is met: *
* *
* - Redistributions of source code must retain the above copyright *
* notice, this condition and the following disclaimer. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND *
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, *
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *
* DISCLAIMED. IN NO EVENT SHALL SEGGER Microcontroller BE LIABLE FOR *
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR *
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; *
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF *
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE *
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH *
* DAMAGE. *
* *
**********************************************************************
-------------------------- END-OF-HEADER -----------------------------
File : SEGGER_THUMB_Startup.s
Purpose : Generic runtime init startup code for ARM CPUs running
in THUMB mode.
Designed to work with the SEGGER linker to produce
smallest possible executables.
This file does not normally require any customization.
Additional information:
Preprocessor Definitions
FULL_LIBRARY
If defined then
- argc, argv are set up by calling SEGGER_SEMIHOST_GetArgs().
- the exit symbol is defined and executes on return from main.
- the exit symbol calls destructors, atexit functions and then
calls SEGGER_SEMIHOST_Exit().
If not defined then
- argc and argv are not valid (main is assumed to not take parameters)
- the exit symbol is defined, executes on return from main and
halts in a loop.
*/
.syntax unified
/*********************************************************************
*
* Defines, configurable
*
**********************************************************************
*/
#ifndef APP_ENTRY_POINT
#define APP_ENTRY_POINT main
#endif
#ifndef ARGSSPACE
#define ARGSSPACE 128
#endif
/*********************************************************************
*
* Macros
*
**********************************************************************
*/
//
// Declare a label as function symbol (without switching sections)
//
.macro MARK_FUNC Name
.global \Name
.thumb_func
.code 16
\Name:
.endm
//
// Declare a regular function.
// Functions from the startup are placed in the init section.
//
.macro START_FUNC Name
.section .init.\Name, "ax"
.global \Name
.balign 2
.thumb_func
.code 16
\Name:
.endm
//
// Declare a weak function
//
.macro WEAK_FUNC Name
.section .init.\Name, "ax", %progbits
.global \Name
.weak \Name
.balign 2
.thumb_func
.code 16
\Name:
.endm
//
// Mark the end of a function and calculate its size
//
.macro END_FUNC name
.size \name,.-\name
.endm
/*********************************************************************
*
* Externals
*
**********************************************************************
*/
.extern APP_ENTRY_POINT // typically main
/*********************************************************************
*
* Global functions
*
**********************************************************************
*/
/*********************************************************************
*
* _start
*
* Function description
* Entry point for the startup code.
* Usually called by the reset handler.
* Performs all initialisation, based on the entries in the
* linker-generated init table, then calls main().
* It is device independent, so there should not be any need for an
* end-user to modify it.
*
* Additional information
* At this point, the stack pointer should already have been
* initialized
* - by hardware (such as on Cortex-M),
* - by the device-specific reset handler,
* - or by the debugger (such as for RAM Code).
*/
#undef L
#define L(label) .L_start_##label
START_FUNC _start
//
// Call linker init functions which in turn performs the following:
// * Perform segment init
// * Perform heap init (if used)
// * Call constructors of global Objects (if any exist)
//
ldr R4, =__SEGGER_init_table__ // Set table pointer to start of initialization table
L(RunInit):
ldr R0, [R4] // Get next initialization function from table
adds R4, R4, #4 // Increment table pointer to point to function arguments
blx R0 // Call initialization function
b L(RunInit)
//
MARK_FUNC __SEGGER_init_done
//
// Time to call main(), the application entry point.
//
#ifndef FULL_LIBRARY
//
// In a real embedded application ("Free-standing environment"),
// main() does not get any arguments,
// which means it is not necessary to init R0 and R1.
//
bl APP_ENTRY_POINT // Call to application entry point (usually main())
END_FUNC _start
//
// end of _start
// Fall-through to exit if main ever returns.
//
MARK_FUNC exit
//
// In a free-standing environment, if returned from application:
// Loop forever.
//
b .
.size exit,.-exit
#else
//
// In a hosted environment,
// we need to load R0 and R1 with argc and argv, in order to handle
// the command line arguments.
// This is required for some programs running under control of a
// debugger, such as automated tests.
//
movs R0, #ARGSSPACE
ldr R1, =__SEGGER_init_arg_data
bl SEGGER_SEMIHOST_GetArgs
ldr R1, =__SEGGER_init_arg_data
bl APP_ENTRY_POINT // Call to application entry point (usually main())
bl exit // Call exit function
b . // If we unexpectedly return from exit, hang.
END_FUNC _start
#endif
//
#ifdef FULL_LIBRARY
/*********************************************************************
*
* exit
*
* Function description
* Exit of the system.
* Called on return from application entry point or explicit call
* to exit.
*
* Additional information
* In a hosted environment exit gracefully, by
* saving the return value,
* calling destructurs of global objects,
* calling registered atexit functions,
* and notifying the host/debugger.
*/
#undef L
#define L(label) .L_exit_##label
WEAK_FUNC exit
mov R5, R0 // Save the exit parameter/return result
//
// Call destructors
//
ldr R0, =__dtors_start__ // Pointer to destructor list
ldr R1, =__dtors_end__
L(Loop):
cmp R0, R1
beq L(End) // Reached end of destructor list? => Done
ldr R2, [R0] // Load current destructor address into R2
adds R0, R0, #4 // Increment pointer
push {R0-R1} // Save R0 and R1
blx R2 // Call destructor
pop {R0-R1} // Restore R0 and R1
b L(Loop)
L(End):
//
// Call atexit functions
//
bl __SEGGER_RTL_execute_at_exit_fns
//
// Call debug_exit with return result/exit parameter
//
mov R0, R5
bl SEGGER_SEMIHOST_Exit
//
// If execution is not terminated, loop forever
//
L(ExitLoop):
b L(ExitLoop) // Loop forever.
END_FUNC exit
#endif
#ifdef FULL_LIBRARY
.bss
__SEGGER_init_arg_data:
.space ARGSSPACE
.size __SEGGER_init_arg_data, .-__SEGGER_init_arg_data
.type __SEGGER_init_arg_data, %object
#endif
/*************************** End of file ****************************/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_WakeUpFromStop/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_WakeUpFromStop/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_RestartComIT/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_RestartComIT/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_RestartComIT/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aenu1/aps3e
| 2,105,475
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/sp_x86_64_asm.S
|
/* sp_x86_64_asm.S */
/*
* Copyright (C) 2006-2024 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef WOLFSSL_USER_SETTINGS
#ifdef WOLFSSL_USER_SETTINGS_ASM
/*
* user_settings_asm.h is a file generated by the script user_settings_asm.sh.
* The script takes in a user_settings.h and produces user_settings_asm.h, which
* is a stripped down version of user_settings.h containing only preprocessor
* directives. This makes the header safe to include in assembly (.S) files.
*/
#include "user_settings_asm.h"
#else
/*
* Note: if user_settings.h contains any C code (e.g. a typedef or function
* prototype), including it here in an assembly (.S) file will cause an
* assembler failure. See user_settings_asm.h above.
*/
#include "user_settings.h"
#endif /* WOLFSSL_USER_SETTINGS_ASM */
#endif /* WOLFSSL_USER_SETTINGS */
#ifndef HAVE_INTEL_AVX1
#define HAVE_INTEL_AVX1
#endif /* HAVE_INTEL_AVX1 */
#ifndef NO_AVX2_SUPPORT
#define HAVE_INTEL_AVX2
#endif /* NO_AVX2_SUPPORT */
#ifdef WOLFSSL_SP_X86_64_ASM
#ifndef WOLFSSL_SP_NO_2048
#ifndef WOLFSSL_SP_NO_2048
/* Read big endian unsigned byte array into r.
* Uses the bswap instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_2048_from_bin_bswap
.type sp_2048_from_bin_bswap,@function
.align 16
sp_2048_from_bin_bswap:
#else
.section __TEXT,__text
.globl _sp_2048_from_bin_bswap
.p2align 4
_sp_2048_from_bin_bswap:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $0x100, %r10
xorq %r11, %r11
jmp L_2048_from_bin_bswap_64_end
L_2048_from_bin_bswap_64_start:
subq $0x40, %r9
movq 56(%r9), %rax
movq 48(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq 40(%r9), %rax
movq 32(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movq 24(%r9), %rax
movq 16(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq 8(%r9), %rax
movq (%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_2048_from_bin_bswap_64_end:
cmpq $63, %rcx
jg L_2048_from_bin_bswap_64_start
jmp L_2048_from_bin_bswap_8_end
L_2048_from_bin_bswap_8_start:
subq $8, %r9
movq (%r9), %rax
bswapq %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_2048_from_bin_bswap_8_end:
cmpq $7, %rcx
jg L_2048_from_bin_bswap_8_start
cmpq %r11, %rcx
je L_2048_from_bin_bswap_hi_end
movq %r11, %r8
movq %r11, %rax
L_2048_from_bin_bswap_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_2048_from_bin_bswap_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_2048_from_bin_bswap_hi_end:
cmpq %r10, %rdi
jge L_2048_from_bin_bswap_zero_end
L_2048_from_bin_bswap_zero_start:
movq %r11, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_2048_from_bin_bswap_zero_start
L_2048_from_bin_bswap_zero_end:
repz retq
#ifndef __APPLE__
.size sp_2048_from_bin_bswap,.-sp_2048_from_bin_bswap
#endif /* __APPLE__ */
#ifndef NO_MOVBE_SUPPORT
/* Read big endian unsigned byte array into r.
* Uses the movbe instruction which is an optional instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_2048_from_bin_movbe
.type sp_2048_from_bin_movbe,@function
.align 16
sp_2048_from_bin_movbe:
#else
.section __TEXT,__text
.globl _sp_2048_from_bin_movbe
.p2align 4
_sp_2048_from_bin_movbe:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $0x100, %r10
jmp L_2048_from_bin_movbe_64_end
L_2048_from_bin_movbe_64_start:
subq $0x40, %r9
movbeq 56(%r9), %rax
movbeq 48(%r9), %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movbeq 40(%r9), %rax
movbeq 32(%r9), %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movbeq 24(%r9), %rax
movbeq 16(%r9), %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movbeq 8(%r9), %rax
movbeq (%r9), %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_2048_from_bin_movbe_64_end:
cmpq $63, %rcx
jg L_2048_from_bin_movbe_64_start
jmp L_2048_from_bin_movbe_8_end
L_2048_from_bin_movbe_8_start:
subq $8, %r9
movbeq (%r9), %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_2048_from_bin_movbe_8_end:
cmpq $7, %rcx
jg L_2048_from_bin_movbe_8_start
cmpq $0x00, %rcx
je L_2048_from_bin_movbe_hi_end
movq $0x00, %r8
movq $0x00, %rax
L_2048_from_bin_movbe_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_2048_from_bin_movbe_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_2048_from_bin_movbe_hi_end:
cmpq %r10, %rdi
jge L_2048_from_bin_movbe_zero_end
L_2048_from_bin_movbe_zero_start:
movq $0x00, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_2048_from_bin_movbe_zero_start
L_2048_from_bin_movbe_zero_end:
repz retq
#ifndef __APPLE__
.size sp_2048_from_bin_movbe,.-sp_2048_from_bin_movbe
#endif /* __APPLE__ */
#endif /* !NO_MOVBE_SUPPORT */
/* Write r as big endian to byte array.
* Fixed length number of bytes written: 256
* Uses the bswap instruction.
*
* r A single precision integer.
* a Byte array.
*/
#ifndef __APPLE__
.text
.globl sp_2048_to_bin_bswap_32
.type sp_2048_to_bin_bswap_32,@function
.align 16
sp_2048_to_bin_bswap_32:
#else
.section __TEXT,__text
.globl _sp_2048_to_bin_bswap_32
.p2align 4
_sp_2048_to_bin_bswap_32:
#endif /* __APPLE__ */
movq 248(%rdi), %rdx
movq 240(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, (%rsi)
movq %rax, 8(%rsi)
movq 232(%rdi), %rdx
movq 224(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 16(%rsi)
movq %rax, 24(%rsi)
movq 216(%rdi), %rdx
movq 208(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 32(%rsi)
movq %rax, 40(%rsi)
movq 200(%rdi), %rdx
movq 192(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 48(%rsi)
movq %rax, 56(%rsi)
movq 184(%rdi), %rdx
movq 176(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 64(%rsi)
movq %rax, 72(%rsi)
movq 168(%rdi), %rdx
movq 160(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 80(%rsi)
movq %rax, 88(%rsi)
movq 152(%rdi), %rdx
movq 144(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 96(%rsi)
movq %rax, 104(%rsi)
movq 136(%rdi), %rdx
movq 128(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 112(%rsi)
movq %rax, 120(%rsi)
movq 120(%rdi), %rdx
movq 112(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 128(%rsi)
movq %rax, 136(%rsi)
movq 104(%rdi), %rdx
movq 96(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 144(%rsi)
movq %rax, 152(%rsi)
movq 88(%rdi), %rdx
movq 80(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 160(%rsi)
movq %rax, 168(%rsi)
movq 72(%rdi), %rdx
movq 64(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 176(%rsi)
movq %rax, 184(%rsi)
movq 56(%rdi), %rdx
movq 48(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 192(%rsi)
movq %rax, 200(%rsi)
movq 40(%rdi), %rdx
movq 32(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 208(%rsi)
movq %rax, 216(%rsi)
movq 24(%rdi), %rdx
movq 16(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 224(%rsi)
movq %rax, 232(%rsi)
movq 8(%rdi), %rdx
movq (%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 240(%rsi)
movq %rax, 248(%rsi)
repz retq
#ifndef __APPLE__
.size sp_2048_to_bin_bswap_32,.-sp_2048_to_bin_bswap_32
#endif /* __APPLE__ */
#ifndef NO_MOVBE_SUPPORT
/* Write r as big endian to byte array.
* Fixed length number of bytes written: 256
* Uses the movbe instruction which is optional.
*
* r A single precision integer.
* a Byte array.
*/
#ifndef __APPLE__
.text
.globl sp_2048_to_bin_movbe_32
.type sp_2048_to_bin_movbe_32,@function
.align 16
sp_2048_to_bin_movbe_32:
#else
.section __TEXT,__text
.globl _sp_2048_to_bin_movbe_32
.p2align 4
_sp_2048_to_bin_movbe_32:
#endif /* __APPLE__ */
movbeq 248(%rdi), %rdx
movbeq 240(%rdi), %rax
movq %rdx, (%rsi)
movq %rax, 8(%rsi)
movbeq 232(%rdi), %rdx
movbeq 224(%rdi), %rax
movq %rdx, 16(%rsi)
movq %rax, 24(%rsi)
movbeq 216(%rdi), %rdx
movbeq 208(%rdi), %rax
movq %rdx, 32(%rsi)
movq %rax, 40(%rsi)
movbeq 200(%rdi), %rdx
movbeq 192(%rdi), %rax
movq %rdx, 48(%rsi)
movq %rax, 56(%rsi)
movbeq 184(%rdi), %rdx
movbeq 176(%rdi), %rax
movq %rdx, 64(%rsi)
movq %rax, 72(%rsi)
movbeq 168(%rdi), %rdx
movbeq 160(%rdi), %rax
movq %rdx, 80(%rsi)
movq %rax, 88(%rsi)
movbeq 152(%rdi), %rdx
movbeq 144(%rdi), %rax
movq %rdx, 96(%rsi)
movq %rax, 104(%rsi)
movbeq 136(%rdi), %rdx
movbeq 128(%rdi), %rax
movq %rdx, 112(%rsi)
movq %rax, 120(%rsi)
movbeq 120(%rdi), %rdx
movbeq 112(%rdi), %rax
movq %rdx, 128(%rsi)
movq %rax, 136(%rsi)
movbeq 104(%rdi), %rdx
movbeq 96(%rdi), %rax
movq %rdx, 144(%rsi)
movq %rax, 152(%rsi)
movbeq 88(%rdi), %rdx
movbeq 80(%rdi), %rax
movq %rdx, 160(%rsi)
movq %rax, 168(%rsi)
movbeq 72(%rdi), %rdx
movbeq 64(%rdi), %rax
movq %rdx, 176(%rsi)
movq %rax, 184(%rsi)
movbeq 56(%rdi), %rdx
movbeq 48(%rdi), %rax
movq %rdx, 192(%rsi)
movq %rax, 200(%rsi)
movbeq 40(%rdi), %rdx
movbeq 32(%rdi), %rax
movq %rdx, 208(%rsi)
movq %rax, 216(%rsi)
movbeq 24(%rdi), %rdx
movbeq 16(%rdi), %rax
movq %rdx, 224(%rsi)
movq %rax, 232(%rsi)
movbeq 8(%rdi), %rdx
movbeq (%rdi), %rax
movq %rdx, 240(%rsi)
movq %rax, 248(%rsi)
repz retq
#ifndef __APPLE__
.size sp_2048_to_bin_movbe_32,.-sp_2048_to_bin_movbe_32
#endif /* __APPLE__ */
#endif /* NO_MOVBE_SUPPORT */
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_2048_mul_16
.type sp_2048_mul_16,@function
.align 16
sp_2048_mul_16:
#else
.section __TEXT,__text
.globl _sp_2048_mul_16
.p2align 4
_sp_2048_mul_16:
#endif /* __APPLE__ */
movq %rdx, %rcx
subq $0x80, %rsp
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
movq %rax, (%rsp)
movq %rdx, %r9
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 8(%rsp)
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 16(%rsp)
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 24(%rsp)
# A[0] * B[4]
movq 32(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[0]
movq (%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 32(%rsp)
# A[0] * B[5]
movq 40(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[4]
movq 32(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[1]
movq 8(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[0]
movq (%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 40(%rsp)
# A[0] * B[6]
movq 48(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[5]
movq 40(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[4]
movq 32(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[2]
movq 16(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[1]
movq 8(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[0]
movq (%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 48(%rsp)
# A[0] * B[7]
movq 56(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[6]
movq 48(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[5]
movq 40(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[4]
movq 32(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[3]
movq 24(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[2]
movq 16(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[1]
movq 8(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[0]
movq (%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 56(%rsp)
# A[0] * B[8]
movq 64(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[7]
movq 56(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[6]
movq 48(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[5]
movq 40(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[4]
movq 32(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[3]
movq 24(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[2]
movq 16(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[1]
movq 8(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[0]
movq (%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 64(%rsp)
# A[0] * B[9]
movq 72(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[8]
movq 64(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[7]
movq 56(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[6]
movq 48(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[5]
movq 40(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[4]
movq 32(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[3]
movq 24(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[2]
movq 16(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[1]
movq 8(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[0]
movq (%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 72(%rsp)
# A[0] * B[10]
movq 80(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[9]
movq 72(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[8]
movq 64(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[7]
movq 56(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[6]
movq 48(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[5]
movq 40(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[4]
movq 32(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[3]
movq 24(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[2]
movq 16(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[1]
movq 8(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[0]
movq (%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 80(%rsp)
# A[0] * B[11]
movq 88(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[10]
movq 80(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[9]
movq 72(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[8]
movq 64(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[7]
movq 56(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[6]
movq 48(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[5]
movq 40(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[4]
movq 32(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[3]
movq 24(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B[2]
movq 16(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[1]
movq 8(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[0]
movq (%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 88(%rsp)
# A[0] * B[12]
movq 96(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[11]
movq 88(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[10]
movq 80(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[9]
movq 72(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[8]
movq 64(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[7]
movq 56(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[6]
movq 48(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[5]
movq 40(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[4]
movq 32(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[3]
movq 24(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B[2]
movq 16(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[1]
movq 8(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[12] * B[0]
movq (%rcx), %rax
mulq 96(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 96(%rsp)
# A[0] * B[13]
movq 104(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[12]
movq 96(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[11]
movq 88(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[10]
movq 80(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[9]
movq 72(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[8]
movq 64(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[7]
movq 56(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[6]
movq 48(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[5]
movq 40(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[4]
movq 32(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[3]
movq 24(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B[2]
movq 16(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[12] * B[1]
movq 8(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[13] * B[0]
movq (%rcx), %rax
mulq 104(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 104(%rsp)
# A[0] * B[14]
movq 112(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[13]
movq 104(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[12]
movq 96(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[11]
movq 88(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[10]
movq 80(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[9]
movq 72(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[8]
movq 64(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[7]
movq 56(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[6]
movq 48(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B[5]
movq 40(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[4]
movq 32(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[3]
movq 24(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B[2]
movq 16(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[13] * B[1]
movq 8(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[14] * B[0]
movq (%rcx), %rax
mulq 112(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 112(%rsp)
# A[0] * B[15]
movq 120(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[14]
movq 112(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[13]
movq 104(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[12]
movq 96(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[11]
movq 88(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[10]
movq 80(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[9]
movq 72(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[8]
movq 64(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[7]
movq 56(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[6]
movq 48(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B[5]
movq 40(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[4]
movq 32(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[12] * B[3]
movq 24(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B[2]
movq 16(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[14] * B[1]
movq 8(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[15] * B[0]
movq (%rcx), %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 120(%rsp)
# A[1] * B[15]
movq 120(%rcx), %rax
mulq 8(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[14]
movq 112(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[13]
movq 104(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[12]
movq 96(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[11]
movq 88(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[10]
movq 80(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[9]
movq 72(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[8]
movq 64(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[7]
movq 56(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[6]
movq 48(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B[5]
movq 40(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[12] * B[4]
movq 32(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[13] * B[3]
movq 24(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B[2]
movq 16(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[15] * B[1]
movq 8(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 128(%rdi)
# A[2] * B[15]
movq 120(%rcx), %rax
mulq 16(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[14]
movq 112(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[13]
movq 104(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[12]
movq 96(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[11]
movq 88(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[10]
movq 80(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[9]
movq 72(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B[8]
movq 64(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[7]
movq 56(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[6]
movq 48(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B[5]
movq 40(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[13] * B[4]
movq 32(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[14] * B[3]
movq 24(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B[2]
movq 16(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 136(%rdi)
# A[3] * B[15]
movq 120(%rcx), %rax
mulq 24(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[14]
movq 112(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[13]
movq 104(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[12]
movq 96(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[11]
movq 88(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[10]
movq 80(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[9]
movq 72(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B[8]
movq 64(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[7]
movq 56(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[12] * B[6]
movq 48(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B[5]
movq 40(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[14] * B[4]
movq 32(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[15] * B[3]
movq 24(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 144(%rdi)
# A[4] * B[15]
movq 120(%rcx), %rax
mulq 32(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[14]
movq 112(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[13]
movq 104(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[12]
movq 96(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[11]
movq 88(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[10]
movq 80(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[9]
movq 72(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B[8]
movq 64(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[12] * B[7]
movq 56(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[13] * B[6]
movq 48(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B[5]
movq 40(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[15] * B[4]
movq 32(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 152(%rdi)
# A[5] * B[15]
movq 120(%rcx), %rax
mulq 40(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[14]
movq 112(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[13]
movq 104(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[12]
movq 96(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B[11]
movq 88(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[10]
movq 80(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[9]
movq 72(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B[8]
movq 64(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[13] * B[7]
movq 56(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[14] * B[6]
movq 48(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B[5]
movq 40(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 160(%rdi)
# A[6] * B[15]
movq 120(%rcx), %rax
mulq 48(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[14]
movq 112(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[13]
movq 104(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[12]
movq 96(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B[11]
movq 88(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[10]
movq 80(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[12] * B[9]
movq 72(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B[8]
movq 64(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[14] * B[7]
movq 56(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[15] * B[6]
movq 48(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 168(%rdi)
# A[7] * B[15]
movq 120(%rcx), %rax
mulq 56(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[14]
movq 112(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[13]
movq 104(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[12]
movq 96(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B[11]
movq 88(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[12] * B[10]
movq 80(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[13] * B[9]
movq 72(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B[8]
movq 64(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[15] * B[7]
movq 56(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 176(%rdi)
# A[8] * B[15]
movq 120(%rcx), %rax
mulq 64(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B[14]
movq 112(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[13]
movq 104(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[12]
movq 96(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B[11]
movq 88(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[13] * B[10]
movq 80(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[14] * B[9]
movq 72(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B[8]
movq 64(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 184(%rdi)
# A[9] * B[15]
movq 120(%rcx), %rax
mulq 72(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B[14]
movq 112(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[13]
movq 104(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[12] * B[12]
movq 96(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B[11]
movq 88(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[14] * B[10]
movq 80(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[15] * B[9]
movq 72(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 192(%rdi)
# A[10] * B[15]
movq 120(%rcx), %rax
mulq 80(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B[14]
movq 112(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[12] * B[13]
movq 104(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[13] * B[12]
movq 96(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B[11]
movq 88(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[15] * B[10]
movq 80(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 200(%rdi)
# A[11] * B[15]
movq 120(%rcx), %rax
mulq 88(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B[14]
movq 112(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[13] * B[13]
movq 104(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[14] * B[12]
movq 96(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B[11]
movq 88(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 208(%rdi)
# A[12] * B[15]
movq 120(%rcx), %rax
mulq 96(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B[14]
movq 112(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[14] * B[13]
movq 104(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[15] * B[12]
movq 96(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 216(%rdi)
# A[13] * B[15]
movq 120(%rcx), %rax
mulq 104(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B[14]
movq 112(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[15] * B[13]
movq 104(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 224(%rdi)
# A[14] * B[15]
movq 120(%rcx), %rax
mulq 112(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B[14]
movq 112(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 232(%rdi)
# A[15] * B[15]
movq 120(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
movq %r8, 240(%rdi)
movq %r9, 248(%rdi)
movq (%rsp), %rax
movq 8(%rsp), %rdx
movq 16(%rsp), %r8
movq 24(%rsp), %r9
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rsp), %rax
movq 40(%rsp), %rdx
movq 48(%rsp), %r8
movq 56(%rsp), %r9
movq %rax, 32(%rdi)
movq %rdx, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rsp), %rax
movq 72(%rsp), %rdx
movq 80(%rsp), %r8
movq 88(%rsp), %r9
movq %rax, 64(%rdi)
movq %rdx, 72(%rdi)
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
movq 96(%rsp), %rax
movq 104(%rsp), %rdx
movq 112(%rsp), %r8
movq 120(%rsp), %r9
movq %rax, 96(%rdi)
movq %rdx, 104(%rdi)
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
addq $0x80, %rsp
repz retq
#ifndef __APPLE__
.size sp_2048_mul_16,.-sp_2048_mul_16
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Multiply a and b into r. (r = a * b)
*
* r Result of multiplication.
* a First number to multiply.
* b Second number to multiply.
*/
#ifndef __APPLE__
.text
.globl sp_2048_mul_avx2_16
.type sp_2048_mul_avx2_16,@function
.align 16
sp_2048_mul_avx2_16:
#else
.section __TEXT,__text
.globl _sp_2048_mul_avx2_16
.p2align 4
_sp_2048_mul_avx2_16:
#endif /* __APPLE__ */
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
movq %rdx, %rbp
subq $0x80, %rsp
cmpq %rdi, %rsi
movq %rsp, %rbx
cmovne %rdi, %rbx
cmpq %rdi, %rbp
cmove %rsp, %rbx
addq $0x80, %rdi
xorq %r14, %r14
movq (%rsi), %rdx
# A[0] * B[0]
mulx (%rbp), %r8, %r9
# A[0] * B[1]
mulx 8(%rbp), %rax, %r10
movq %r8, (%rbx)
adcxq %rax, %r9
# A[0] * B[2]
mulx 16(%rbp), %rax, %r11
movq %r9, 8(%rbx)
adcxq %rax, %r10
# A[0] * B[3]
mulx 24(%rbp), %rax, %r12
movq %r10, 16(%rbx)
adcxq %rax, %r11
movq %r11, 24(%rbx)
# A[0] * B[4]
mulx 32(%rbp), %rax, %r8
adcxq %rax, %r12
# A[0] * B[5]
mulx 40(%rbp), %rax, %r9
movq %r12, 32(%rbx)
adcxq %rax, %r8
# A[0] * B[6]
mulx 48(%rbp), %rax, %r10
movq %r8, 40(%rbx)
adcxq %rax, %r9
# A[0] * B[7]
mulx 56(%rbp), %rax, %r11
movq %r9, 48(%rbx)
adcxq %rax, %r10
movq %r10, 56(%rbx)
# A[0] * B[8]
mulx 64(%rbp), %rax, %r12
adcxq %rax, %r11
# A[0] * B[9]
mulx 72(%rbp), %rax, %r8
movq %r11, 64(%rbx)
adcxq %rax, %r12
# A[0] * B[10]
mulx 80(%rbp), %rax, %r9
movq %r12, 72(%rbx)
adcxq %rax, %r8
# A[0] * B[11]
mulx 88(%rbp), %rax, %r10
movq %r8, 80(%rbx)
adcxq %rax, %r9
movq %r9, 88(%rbx)
# A[0] * B[12]
mulx 96(%rbp), %rax, %r11
adcxq %rax, %r10
# A[0] * B[13]
mulx 104(%rbp), %rax, %r12
movq %r10, 96(%rbx)
adcxq %rax, %r11
# A[0] * B[14]
mulx 112(%rbp), %rax, %r8
movq %r11, 104(%rbx)
adcxq %rax, %r12
# A[0] * B[15]
mulx 120(%rbp), %rax, %r9
movq %r12, 112(%rbx)
adcxq %rax, %r8
adcxq %r14, %r9
movq %r14, %r13
adcxq %r14, %r13
movq %r8, 120(%rbx)
movq %r9, (%rdi)
movq 8(%rsi), %rdx
movq 8(%rbx), %r9
movq 16(%rbx), %r10
movq 24(%rbx), %r11
movq 32(%rbx), %r12
movq 40(%rbx), %r8
# A[1] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[1] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 8(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[1] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r10, 16(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[1] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r11, 24(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 32(%rbx)
movq 48(%rbx), %r9
movq 56(%rbx), %r10
movq 64(%rbx), %r11
movq 72(%rbx), %r12
# A[1] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[1] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, 40(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[1] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r9, 48(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[1] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 56(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 64(%rbx)
movq 80(%rbx), %r8
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
# A[1] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[1] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[1] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[1] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 96(%rbx)
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
# A[1] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[1] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[1] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[1] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
movq %r14, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r13, %r10
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq 16(%rsi), %rdx
movq 16(%rbx), %r10
movq 24(%rbx), %r11
movq 32(%rbx), %r12
movq 40(%rbx), %r8
movq 48(%rbx), %r9
# A[2] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[2] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 16(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[2] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r11, 24(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[2] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r12, 32(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 40(%rbx)
movq 56(%rbx), %r10
movq 64(%rbx), %r11
movq 72(%rbx), %r12
movq 80(%rbx), %r8
# A[2] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[2] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r9, 48(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[2] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r10, 56(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[2] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r11, 64(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 72(%rbx)
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
# A[2] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[2] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[2] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[2] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 104(%rbx)
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[2] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[2] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[2] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[2] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r9, (%rdi)
movq %r14, %r11
adcxq %rax, %r10
adoxq %rcx, %r11
adcxq %r13, %r11
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq 24(%rsi), %rdx
movq 24(%rbx), %r11
movq 32(%rbx), %r12
movq 40(%rbx), %r8
movq 48(%rbx), %r9
movq 56(%rbx), %r10
# A[3] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[3] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r11, 24(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[3] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r12, 32(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[3] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r8, 40(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 48(%rbx)
movq 64(%rbx), %r11
movq 72(%rbx), %r12
movq 80(%rbx), %r8
movq 88(%rbx), %r9
# A[3] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[3] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 56(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[3] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r11, 64(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[3] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 80(%rbx)
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
# A[3] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[3] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[3] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[3] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 112(%rbx)
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
# A[3] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[3] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[3] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[3] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
movq %r14, %r12
adcxq %rax, %r11
adoxq %rcx, %r12
adcxq %r13, %r12
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
movq 32(%rsi), %rdx
movq 32(%rbx), %r12
movq 40(%rbx), %r8
movq 48(%rbx), %r9
movq 56(%rbx), %r10
movq 64(%rbx), %r11
# A[4] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[4] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r12, 32(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[4] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r8, 40(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[4] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r9, 48(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 56(%rbx)
movq 72(%rbx), %r12
movq 80(%rbx), %r8
movq 88(%rbx), %r9
movq 96(%rbx), %r10
# A[4] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[4] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r11, 64(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[4] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[4] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 88(%rbx)
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
# A[4] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[4] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[4] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[4] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 120(%rbx)
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
# A[4] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[4] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[4] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[4] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
movq %r14, %r8
adcxq %rax, %r12
adoxq %rcx, %r8
adcxq %r13, %r8
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r12, 24(%rdi)
movq %r8, 32(%rdi)
movq 40(%rsi), %rdx
movq 40(%rbx), %r8
movq 48(%rbx), %r9
movq 56(%rbx), %r10
movq 64(%rbx), %r11
movq 72(%rbx), %r12
# A[5] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r8, 40(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[5] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r9, 48(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[5] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r10, 56(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 64(%rbx)
movq 80(%rbx), %r8
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
# A[5] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[5] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[5] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 96(%rbx)
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[5] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[5] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[5] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, (%rdi)
movq 16(%rdi), %r11
movq 24(%rdi), %r12
movq 32(%rdi), %r8
# A[5] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[5] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[5] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[5] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
movq %r14, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r13, %r9
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r8, 32(%rdi)
movq %r9, 40(%rdi)
movq 48(%rsi), %rdx
movq 48(%rbx), %r9
movq 56(%rbx), %r10
movq 64(%rbx), %r11
movq 72(%rbx), %r12
movq 80(%rbx), %r8
# A[6] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[6] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 48(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[6] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r10, 56(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[6] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r11, 64(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 72(%rbx)
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
# A[6] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[6] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[6] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[6] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 104(%rbx)
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
# A[6] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[6] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[6] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[6] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 8(%rdi)
movq 24(%rdi), %r12
movq 32(%rdi), %r8
movq 40(%rdi), %r9
# A[6] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[6] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[6] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[6] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
movq %r14, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r13, %r10
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r9, 40(%rdi)
movq %r10, 48(%rdi)
movq 56(%rsi), %rdx
movq 56(%rbx), %r10
movq 64(%rbx), %r11
movq 72(%rbx), %r12
movq 80(%rbx), %r8
movq 88(%rbx), %r9
# A[7] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[7] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 56(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[7] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r11, 64(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[7] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 80(%rbx)
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
# A[7] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[7] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[7] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[7] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 112(%rbx)
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
# A[7] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[7] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[7] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[7] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 16(%rdi)
movq 32(%rdi), %r8
movq 40(%rdi), %r9
movq 48(%rdi), %r10
# A[7] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[7] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[7] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[7] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
movq %r14, %r11
adcxq %rax, %r10
adoxq %rcx, %r11
adcxq %r13, %r11
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r10, 48(%rdi)
movq %r11, 56(%rdi)
movq 64(%rsi), %rdx
movq 64(%rbx), %r11
movq 72(%rbx), %r12
movq 80(%rbx), %r8
movq 88(%rbx), %r9
movq 96(%rbx), %r10
# A[8] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[8] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r11, 64(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[8] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[8] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 88(%rbx)
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
# A[8] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[8] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[8] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[8] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 120(%rbx)
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
movq 32(%rdi), %r8
# A[8] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[8] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[8] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[8] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 24(%rdi)
movq 40(%rdi), %r9
movq 48(%rdi), %r10
movq 56(%rdi), %r11
# A[8] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[8] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[8] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[8] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r10, 48(%rdi)
movq %r14, %r12
adcxq %rax, %r11
adoxq %rcx, %r12
adcxq %r13, %r12
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r11, 56(%rdi)
movq %r12, 64(%rdi)
movq 72(%rsi), %rdx
movq 72(%rbx), %r12
movq 80(%rbx), %r8
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
# A[9] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[9] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[9] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[9] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 96(%rbx)
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[9] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[9] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[9] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[9] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, (%rdi)
movq 16(%rdi), %r11
movq 24(%rdi), %r12
movq 32(%rdi), %r8
movq 40(%rdi), %r9
# A[9] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[9] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[9] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[9] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 32(%rdi)
movq 48(%rdi), %r10
movq 56(%rdi), %r11
movq 64(%rdi), %r12
# A[9] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[9] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[9] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r10, 48(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[9] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r11, 56(%rdi)
movq %r14, %r8
adcxq %rax, %r12
adoxq %rcx, %r8
adcxq %r13, %r8
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r12, 64(%rdi)
movq %r8, 72(%rdi)
movq 80(%rsi), %rdx
movq 80(%rbx), %r8
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
# A[10] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[10] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[10] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[10] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 104(%rbx)
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
# A[10] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[10] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[10] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[10] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 8(%rdi)
movq 24(%rdi), %r12
movq 32(%rdi), %r8
movq 40(%rdi), %r9
movq 48(%rdi), %r10
# A[10] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[10] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[10] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[10] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 40(%rdi)
movq 56(%rdi), %r11
movq 64(%rdi), %r12
movq 72(%rdi), %r8
# A[10] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[10] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r10, 48(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[10] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r11, 56(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[10] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r12, 64(%rdi)
movq %r14, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r13, %r9
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r8, 72(%rdi)
movq %r9, 80(%rdi)
movq 88(%rsi), %rdx
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
# A[11] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[11] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[11] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[11] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 112(%rbx)
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
# A[11] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[11] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[11] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[11] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 16(%rdi)
movq 32(%rdi), %r8
movq 40(%rdi), %r9
movq 48(%rdi), %r10
movq 56(%rdi), %r11
# A[11] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[11] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[11] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[11] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rdi)
movq 64(%rdi), %r12
movq 72(%rdi), %r8
movq 80(%rdi), %r9
# A[11] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[11] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r11, 56(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[11] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r12, 64(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[11] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r8, 72(%rdi)
movq %r14, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r13, %r10
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r9, 80(%rdi)
movq %r10, 88(%rdi)
movq 96(%rsi), %rdx
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
# A[12] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[12] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[12] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[12] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 120(%rbx)
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
movq 32(%rdi), %r8
# A[12] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[12] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[12] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[12] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 24(%rdi)
movq 40(%rdi), %r9
movq 48(%rdi), %r10
movq 56(%rdi), %r11
movq 64(%rdi), %r12
# A[12] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[12] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[12] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[12] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r10, 48(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 56(%rdi)
movq 72(%rdi), %r8
movq 80(%rdi), %r9
movq 88(%rdi), %r10
# A[12] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[12] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r12, 64(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[12] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r8, 72(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[12] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r9, 80(%rdi)
movq %r14, %r11
adcxq %rax, %r10
adoxq %rcx, %r11
adcxq %r13, %r11
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r10, 88(%rdi)
movq %r11, 96(%rdi)
movq 104(%rsi), %rdx
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[13] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[13] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[13] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[13] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, (%rdi)
movq 16(%rdi), %r11
movq 24(%rdi), %r12
movq 32(%rdi), %r8
movq 40(%rdi), %r9
# A[13] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[13] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[13] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[13] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 32(%rdi)
movq 48(%rdi), %r10
movq 56(%rdi), %r11
movq 64(%rdi), %r12
movq 72(%rdi), %r8
# A[13] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[13] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[13] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r10, 48(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[13] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r11, 56(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 64(%rdi)
movq 80(%rdi), %r9
movq 88(%rdi), %r10
movq 96(%rdi), %r11
# A[13] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[13] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r8, 72(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[13] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r9, 80(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[13] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r10, 88(%rdi)
movq %r14, %r12
adcxq %rax, %r11
adoxq %rcx, %r12
adcxq %r13, %r12
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r11, 96(%rdi)
movq %r12, 104(%rdi)
movq 112(%rsi), %rdx
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
# A[14] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[14] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[14] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[14] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 8(%rdi)
movq 24(%rdi), %r12
movq 32(%rdi), %r8
movq 40(%rdi), %r9
movq 48(%rdi), %r10
# A[14] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[14] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[14] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[14] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 40(%rdi)
movq 56(%rdi), %r11
movq 64(%rdi), %r12
movq 72(%rdi), %r8
movq 80(%rdi), %r9
# A[14] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[14] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r10, 48(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[14] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r11, 56(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[14] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r12, 64(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 72(%rdi)
movq 88(%rdi), %r10
movq 96(%rdi), %r11
movq 104(%rdi), %r12
# A[14] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[14] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r9, 80(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[14] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r10, 88(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[14] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r11, 96(%rdi)
movq %r14, %r8
adcxq %rax, %r12
adoxq %rcx, %r8
adcxq %r13, %r8
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r12, 104(%rdi)
movq %r8, 112(%rdi)
movq 120(%rsi), %rdx
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
# A[15] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[15] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[15] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[15] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 16(%rdi)
movq 32(%rdi), %r8
movq 40(%rdi), %r9
movq 48(%rdi), %r10
movq 56(%rdi), %r11
# A[15] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[15] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[15] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[15] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rdi)
movq 64(%rdi), %r12
movq 72(%rdi), %r8
movq 80(%rdi), %r9
movq 88(%rdi), %r10
# A[15] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[15] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r11, 56(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[15] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r12, 64(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[15] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r8, 72(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 80(%rdi)
movq 96(%rdi), %r11
movq 104(%rdi), %r12
movq 112(%rdi), %r8
# A[15] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[15] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r10, 88(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[15] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r11, 96(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[15] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r12, 104(%rdi)
movq %r14, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r13, %r9
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
subq $0x80, %rdi
cmpq %rdi, %rsi
je L_start_2048_mul_avx2_16
cmpq %rdi, %rbp
jne L_end_2048_mul_avx2_16
L_start_2048_mul_avx2_16:
vmovdqu (%rbx), %xmm0
vmovups %xmm0, (%rdi)
vmovdqu 16(%rbx), %xmm0
vmovups %xmm0, 16(%rdi)
vmovdqu 32(%rbx), %xmm0
vmovups %xmm0, 32(%rdi)
vmovdqu 48(%rbx), %xmm0
vmovups %xmm0, 48(%rdi)
vmovdqu 64(%rbx), %xmm0
vmovups %xmm0, 64(%rdi)
vmovdqu 80(%rbx), %xmm0
vmovups %xmm0, 80(%rdi)
vmovdqu 96(%rbx), %xmm0
vmovups %xmm0, 96(%rdi)
vmovdqu 112(%rbx), %xmm0
vmovups %xmm0, 112(%rdi)
L_end_2048_mul_avx2_16:
addq $0x80, %rsp
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
repz retq
#ifndef __APPLE__
.size sp_2048_mul_avx2_16,.-sp_2048_mul_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Add b to a into r. (r = a + b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_2048_add_16
.type sp_2048_add_16,@function
.align 16
sp_2048_add_16:
#else
.section __TEXT,__text
.globl _sp_2048_add_16
.p2align 4
_sp_2048_add_16:
#endif /* __APPLE__ */
# Add
movq (%rsi), %rcx
xorq %rax, %rax
addq (%rdx), %rcx
movq 8(%rsi), %r8
movq %rcx, (%rdi)
adcq 8(%rdx), %r8
movq 16(%rsi), %rcx
movq %r8, 8(%rdi)
adcq 16(%rdx), %rcx
movq 24(%rsi), %r8
movq %rcx, 16(%rdi)
adcq 24(%rdx), %r8
movq 32(%rsi), %rcx
movq %r8, 24(%rdi)
adcq 32(%rdx), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%rdi)
adcq 40(%rdx), %r8
movq 48(%rsi), %rcx
movq %r8, 40(%rdi)
adcq 48(%rdx), %rcx
movq 56(%rsi), %r8
movq %rcx, 48(%rdi)
adcq 56(%rdx), %r8
movq 64(%rsi), %rcx
movq %r8, 56(%rdi)
adcq 64(%rdx), %rcx
movq 72(%rsi), %r8
movq %rcx, 64(%rdi)
adcq 72(%rdx), %r8
movq 80(%rsi), %rcx
movq %r8, 72(%rdi)
adcq 80(%rdx), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%rdi)
adcq 88(%rdx), %r8
movq 96(%rsi), %rcx
movq %r8, 88(%rdi)
adcq 96(%rdx), %rcx
movq 104(%rsi), %r8
movq %rcx, 96(%rdi)
adcq 104(%rdx), %r8
movq 112(%rsi), %rcx
movq %r8, 104(%rdi)
adcq 112(%rdx), %rcx
movq 120(%rsi), %r8
movq %rcx, 112(%rdi)
adcq 120(%rdx), %r8
movq %r8, 120(%rdi)
adcq $0x00, %rax
repz retq
#ifndef __APPLE__
.size sp_2048_add_16,.-sp_2048_add_16
#endif /* __APPLE__ */
/* Sub b from a into a. (a -= b)
*
* a A single precision integer and result.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_2048_sub_in_place_32
.type sp_2048_sub_in_place_32,@function
.align 16
sp_2048_sub_in_place_32:
#else
.section __TEXT,__text
.globl _sp_2048_sub_in_place_32
.p2align 4
_sp_2048_sub_in_place_32:
#endif /* __APPLE__ */
movq (%rdi), %rdx
subq (%rsi), %rdx
movq 8(%rdi), %rcx
movq %rdx, (%rdi)
sbbq 8(%rsi), %rcx
movq 16(%rdi), %rdx
movq %rcx, 8(%rdi)
sbbq 16(%rsi), %rdx
movq 24(%rdi), %rcx
movq %rdx, 16(%rdi)
sbbq 24(%rsi), %rcx
movq 32(%rdi), %rdx
movq %rcx, 24(%rdi)
sbbq 32(%rsi), %rdx
movq 40(%rdi), %rcx
movq %rdx, 32(%rdi)
sbbq 40(%rsi), %rcx
movq 48(%rdi), %rdx
movq %rcx, 40(%rdi)
sbbq 48(%rsi), %rdx
movq 56(%rdi), %rcx
movq %rdx, 48(%rdi)
sbbq 56(%rsi), %rcx
movq 64(%rdi), %rdx
movq %rcx, 56(%rdi)
sbbq 64(%rsi), %rdx
movq 72(%rdi), %rcx
movq %rdx, 64(%rdi)
sbbq 72(%rsi), %rcx
movq 80(%rdi), %rdx
movq %rcx, 72(%rdi)
sbbq 80(%rsi), %rdx
movq 88(%rdi), %rcx
movq %rdx, 80(%rdi)
sbbq 88(%rsi), %rcx
movq 96(%rdi), %rdx
movq %rcx, 88(%rdi)
sbbq 96(%rsi), %rdx
movq 104(%rdi), %rcx
movq %rdx, 96(%rdi)
sbbq 104(%rsi), %rcx
movq 112(%rdi), %rdx
movq %rcx, 104(%rdi)
sbbq 112(%rsi), %rdx
movq 120(%rdi), %rcx
movq %rdx, 112(%rdi)
sbbq 120(%rsi), %rcx
movq 128(%rdi), %rdx
movq %rcx, 120(%rdi)
sbbq 128(%rsi), %rdx
movq 136(%rdi), %rcx
movq %rdx, 128(%rdi)
sbbq 136(%rsi), %rcx
movq 144(%rdi), %rdx
movq %rcx, 136(%rdi)
sbbq 144(%rsi), %rdx
movq 152(%rdi), %rcx
movq %rdx, 144(%rdi)
sbbq 152(%rsi), %rcx
movq 160(%rdi), %rdx
movq %rcx, 152(%rdi)
sbbq 160(%rsi), %rdx
movq 168(%rdi), %rcx
movq %rdx, 160(%rdi)
sbbq 168(%rsi), %rcx
movq 176(%rdi), %rdx
movq %rcx, 168(%rdi)
sbbq 176(%rsi), %rdx
movq 184(%rdi), %rcx
movq %rdx, 176(%rdi)
sbbq 184(%rsi), %rcx
movq 192(%rdi), %rdx
movq %rcx, 184(%rdi)
sbbq 192(%rsi), %rdx
movq 200(%rdi), %rcx
movq %rdx, 192(%rdi)
sbbq 200(%rsi), %rcx
movq 208(%rdi), %rdx
movq %rcx, 200(%rdi)
sbbq 208(%rsi), %rdx
movq 216(%rdi), %rcx
movq %rdx, 208(%rdi)
sbbq 216(%rsi), %rcx
movq 224(%rdi), %rdx
movq %rcx, 216(%rdi)
sbbq 224(%rsi), %rdx
movq 232(%rdi), %rcx
movq %rdx, 224(%rdi)
sbbq 232(%rsi), %rcx
movq 240(%rdi), %rdx
movq %rcx, 232(%rdi)
sbbq 240(%rsi), %rdx
movq 248(%rdi), %rcx
movq %rdx, 240(%rdi)
sbbq 248(%rsi), %rcx
movq %rcx, 248(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_2048_sub_in_place_32,.-sp_2048_sub_in_place_32
#endif /* __APPLE__ */
/* Add b to a into r. (r = a + b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_2048_add_32
.type sp_2048_add_32,@function
.align 16
sp_2048_add_32:
#else
.section __TEXT,__text
.globl _sp_2048_add_32
.p2align 4
_sp_2048_add_32:
#endif /* __APPLE__ */
# Add
movq (%rsi), %rcx
xorq %rax, %rax
addq (%rdx), %rcx
movq 8(%rsi), %r8
movq %rcx, (%rdi)
adcq 8(%rdx), %r8
movq 16(%rsi), %rcx
movq %r8, 8(%rdi)
adcq 16(%rdx), %rcx
movq 24(%rsi), %r8
movq %rcx, 16(%rdi)
adcq 24(%rdx), %r8
movq 32(%rsi), %rcx
movq %r8, 24(%rdi)
adcq 32(%rdx), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%rdi)
adcq 40(%rdx), %r8
movq 48(%rsi), %rcx
movq %r8, 40(%rdi)
adcq 48(%rdx), %rcx
movq 56(%rsi), %r8
movq %rcx, 48(%rdi)
adcq 56(%rdx), %r8
movq 64(%rsi), %rcx
movq %r8, 56(%rdi)
adcq 64(%rdx), %rcx
movq 72(%rsi), %r8
movq %rcx, 64(%rdi)
adcq 72(%rdx), %r8
movq 80(%rsi), %rcx
movq %r8, 72(%rdi)
adcq 80(%rdx), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%rdi)
adcq 88(%rdx), %r8
movq 96(%rsi), %rcx
movq %r8, 88(%rdi)
adcq 96(%rdx), %rcx
movq 104(%rsi), %r8
movq %rcx, 96(%rdi)
adcq 104(%rdx), %r8
movq 112(%rsi), %rcx
movq %r8, 104(%rdi)
adcq 112(%rdx), %rcx
movq 120(%rsi), %r8
movq %rcx, 112(%rdi)
adcq 120(%rdx), %r8
movq 128(%rsi), %rcx
movq %r8, 120(%rdi)
adcq 128(%rdx), %rcx
movq 136(%rsi), %r8
movq %rcx, 128(%rdi)
adcq 136(%rdx), %r8
movq 144(%rsi), %rcx
movq %r8, 136(%rdi)
adcq 144(%rdx), %rcx
movq 152(%rsi), %r8
movq %rcx, 144(%rdi)
adcq 152(%rdx), %r8
movq 160(%rsi), %rcx
movq %r8, 152(%rdi)
adcq 160(%rdx), %rcx
movq 168(%rsi), %r8
movq %rcx, 160(%rdi)
adcq 168(%rdx), %r8
movq 176(%rsi), %rcx
movq %r8, 168(%rdi)
adcq 176(%rdx), %rcx
movq 184(%rsi), %r8
movq %rcx, 176(%rdi)
adcq 184(%rdx), %r8
movq 192(%rsi), %rcx
movq %r8, 184(%rdi)
adcq 192(%rdx), %rcx
movq 200(%rsi), %r8
movq %rcx, 192(%rdi)
adcq 200(%rdx), %r8
movq 208(%rsi), %rcx
movq %r8, 200(%rdi)
adcq 208(%rdx), %rcx
movq 216(%rsi), %r8
movq %rcx, 208(%rdi)
adcq 216(%rdx), %r8
movq 224(%rsi), %rcx
movq %r8, 216(%rdi)
adcq 224(%rdx), %rcx
movq 232(%rsi), %r8
movq %rcx, 224(%rdi)
adcq 232(%rdx), %r8
movq 240(%rsi), %rcx
movq %r8, 232(%rdi)
adcq 240(%rdx), %rcx
movq 248(%rsi), %r8
movq %rcx, 240(%rdi)
adcq 248(%rdx), %r8
movq %r8, 248(%rdi)
adcq $0x00, %rax
repz retq
#ifndef __APPLE__
.size sp_2048_add_32,.-sp_2048_add_32
#endif /* __APPLE__ */
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_2048_mul_32
.type sp_2048_mul_32,@function
.align 16
sp_2048_mul_32:
#else
.section __TEXT,__text
.globl _sp_2048_mul_32
.p2align 4
_sp_2048_mul_32:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x328, %rsp
movq %rdi, 768(%rsp)
movq %rsi, 776(%rsp)
movq %rdx, 784(%rsp)
leaq 512(%rsp), %r10
leaq 128(%rsi), %r12
# Add
movq (%rsi), %rax
xorq %r13, %r13
addq (%r12), %rax
movq 8(%rsi), %rcx
movq %rax, (%r10)
adcq 8(%r12), %rcx
movq 16(%rsi), %r8
movq %rcx, 8(%r10)
adcq 16(%r12), %r8
movq 24(%rsi), %rax
movq %r8, 16(%r10)
adcq 24(%r12), %rax
movq 32(%rsi), %rcx
movq %rax, 24(%r10)
adcq 32(%r12), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%r10)
adcq 40(%r12), %r8
movq 48(%rsi), %rax
movq %r8, 40(%r10)
adcq 48(%r12), %rax
movq 56(%rsi), %rcx
movq %rax, 48(%r10)
adcq 56(%r12), %rcx
movq 64(%rsi), %r8
movq %rcx, 56(%r10)
adcq 64(%r12), %r8
movq 72(%rsi), %rax
movq %r8, 64(%r10)
adcq 72(%r12), %rax
movq 80(%rsi), %rcx
movq %rax, 72(%r10)
adcq 80(%r12), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%r10)
adcq 88(%r12), %r8
movq 96(%rsi), %rax
movq %r8, 88(%r10)
adcq 96(%r12), %rax
movq 104(%rsi), %rcx
movq %rax, 96(%r10)
adcq 104(%r12), %rcx
movq 112(%rsi), %r8
movq %rcx, 104(%r10)
adcq 112(%r12), %r8
movq 120(%rsi), %rax
movq %r8, 112(%r10)
adcq 120(%r12), %rax
movq %rax, 120(%r10)
adcq $0x00, %r13
movq %r13, 792(%rsp)
leaq 640(%rsp), %r11
leaq 128(%rdx), %r12
# Add
movq (%rdx), %rax
xorq %r14, %r14
addq (%r12), %rax
movq 8(%rdx), %rcx
movq %rax, (%r11)
adcq 8(%r12), %rcx
movq 16(%rdx), %r8
movq %rcx, 8(%r11)
adcq 16(%r12), %r8
movq 24(%rdx), %rax
movq %r8, 16(%r11)
adcq 24(%r12), %rax
movq 32(%rdx), %rcx
movq %rax, 24(%r11)
adcq 32(%r12), %rcx
movq 40(%rdx), %r8
movq %rcx, 32(%r11)
adcq 40(%r12), %r8
movq 48(%rdx), %rax
movq %r8, 40(%r11)
adcq 48(%r12), %rax
movq 56(%rdx), %rcx
movq %rax, 48(%r11)
adcq 56(%r12), %rcx
movq 64(%rdx), %r8
movq %rcx, 56(%r11)
adcq 64(%r12), %r8
movq 72(%rdx), %rax
movq %r8, 64(%r11)
adcq 72(%r12), %rax
movq 80(%rdx), %rcx
movq %rax, 72(%r11)
adcq 80(%r12), %rcx
movq 88(%rdx), %r8
movq %rcx, 80(%r11)
adcq 88(%r12), %r8
movq 96(%rdx), %rax
movq %r8, 88(%r11)
adcq 96(%r12), %rax
movq 104(%rdx), %rcx
movq %rax, 96(%r11)
adcq 104(%r12), %rcx
movq 112(%rdx), %r8
movq %rcx, 104(%r11)
adcq 112(%r12), %r8
movq 120(%rdx), %rax
movq %r8, 112(%r11)
adcq 120(%r12), %rax
movq %rax, 120(%r11)
adcq $0x00, %r14
movq %r14, 800(%rsp)
movq %r11, %rdx
movq %r10, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_2048_mul_16@plt
#else
callq _sp_2048_mul_16
#endif /* __APPLE__ */
movq 784(%rsp), %rdx
movq 776(%rsp), %rsi
leaq 256(%rsp), %rdi
addq $0x80, %rdx
addq $0x80, %rsi
#ifndef __APPLE__
callq sp_2048_mul_16@plt
#else
callq _sp_2048_mul_16
#endif /* __APPLE__ */
movq 784(%rsp), %rdx
movq 776(%rsp), %rsi
movq 768(%rsp), %rdi
#ifndef __APPLE__
callq sp_2048_mul_16@plt
#else
callq _sp_2048_mul_16
#endif /* __APPLE__ */
#ifdef _WIN64
movq 784(%rsp), %rdx
movq 776(%rsp), %rsi
movq 768(%rsp), %rdi
#endif /* _WIN64 */
movq 792(%rsp), %r13
movq 800(%rsp), %r14
movq 768(%rsp), %r15
movq %r13, %r9
leaq 512(%rsp), %r10
leaq 640(%rsp), %r11
andq %r14, %r9
negq %r13
negq %r14
addq $0x100, %r15
movq (%r10), %rax
movq (%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, (%r10)
movq %rcx, (%r11)
movq 8(%r10), %rax
movq 8(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 8(%r10)
movq %rcx, 8(%r11)
movq 16(%r10), %rax
movq 16(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 16(%r10)
movq %rcx, 16(%r11)
movq 24(%r10), %rax
movq 24(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 24(%r10)
movq %rcx, 24(%r11)
movq 32(%r10), %rax
movq 32(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 32(%r10)
movq %rcx, 32(%r11)
movq 40(%r10), %rax
movq 40(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 40(%r10)
movq %rcx, 40(%r11)
movq 48(%r10), %rax
movq 48(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 48(%r10)
movq %rcx, 48(%r11)
movq 56(%r10), %rax
movq 56(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 56(%r10)
movq %rcx, 56(%r11)
movq 64(%r10), %rax
movq 64(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 64(%r10)
movq %rcx, 64(%r11)
movq 72(%r10), %rax
movq 72(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 72(%r10)
movq %rcx, 72(%r11)
movq 80(%r10), %rax
movq 80(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 80(%r10)
movq %rcx, 80(%r11)
movq 88(%r10), %rax
movq 88(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 88(%r10)
movq %rcx, 88(%r11)
movq 96(%r10), %rax
movq 96(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 96(%r10)
movq %rcx, 96(%r11)
movq 104(%r10), %rax
movq 104(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 104(%r10)
movq %rcx, 104(%r11)
movq 112(%r10), %rax
movq 112(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 112(%r10)
movq %rcx, 112(%r11)
movq 120(%r10), %rax
movq 120(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 120(%r10)
movq %rcx, 120(%r11)
movq (%r10), %rax
addq (%r11), %rax
movq 8(%r10), %rcx
movq %rax, (%r15)
adcq 8(%r11), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r15)
adcq 16(%r11), %r8
movq 24(%r10), %rax
movq %r8, 16(%r15)
adcq 24(%r11), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r15)
adcq 32(%r11), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r15)
adcq 40(%r11), %r8
movq 48(%r10), %rax
movq %r8, 40(%r15)
adcq 48(%r11), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r15)
adcq 56(%r11), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r15)
adcq 64(%r11), %r8
movq 72(%r10), %rax
movq %r8, 64(%r15)
adcq 72(%r11), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r15)
adcq 80(%r11), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r15)
adcq 88(%r11), %r8
movq 96(%r10), %rax
movq %r8, 88(%r15)
adcq 96(%r11), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r15)
adcq 104(%r11), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r15)
adcq 112(%r11), %r8
movq 120(%r10), %rax
movq %r8, 112(%r15)
adcq 120(%r11), %rax
movq %rax, 120(%r15)
adcq $0x00, %r9
leaq 256(%rsp), %r11
movq %rsp, %r10
movq (%r10), %rax
subq (%r11), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%r11), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%r11), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%r11), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%r11), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%r11), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%r11), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%r11), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%r11), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%r11), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%r11), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%r11), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%r11), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%r11), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%r11), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%r11), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%r11), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%r11), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%r11), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%r11), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%r11), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%r11), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%r11), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%r11), %r8
movq 192(%r10), %rax
movq %r8, 184(%r10)
sbbq 192(%r11), %rax
movq 200(%r10), %rcx
movq %rax, 192(%r10)
sbbq 200(%r11), %rcx
movq 208(%r10), %r8
movq %rcx, 200(%r10)
sbbq 208(%r11), %r8
movq 216(%r10), %rax
movq %r8, 208(%r10)
sbbq 216(%r11), %rax
movq 224(%r10), %rcx
movq %rax, 216(%r10)
sbbq 224(%r11), %rcx
movq 232(%r10), %r8
movq %rcx, 224(%r10)
sbbq 232(%r11), %r8
movq 240(%r10), %rax
movq %r8, 232(%r10)
sbbq 240(%r11), %rax
movq 248(%r10), %rcx
movq %rax, 240(%r10)
sbbq 248(%r11), %rcx
movq %rcx, 248(%r10)
sbbq $0x00, %r9
movq (%r10), %rax
subq (%rdi), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%rdi), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%rdi), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%rdi), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%rdi), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%rdi), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%rdi), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%rdi), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%rdi), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%rdi), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%rdi), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%rdi), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%rdi), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%rdi), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%rdi), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%rdi), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%rdi), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%rdi), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%rdi), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%rdi), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%rdi), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%rdi), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%rdi), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%rdi), %r8
movq 192(%r10), %rax
movq %r8, 184(%r10)
sbbq 192(%rdi), %rax
movq 200(%r10), %rcx
movq %rax, 192(%r10)
sbbq 200(%rdi), %rcx
movq 208(%r10), %r8
movq %rcx, 200(%r10)
sbbq 208(%rdi), %r8
movq 216(%r10), %rax
movq %r8, 208(%r10)
sbbq 216(%rdi), %rax
movq 224(%r10), %rcx
movq %rax, 216(%r10)
sbbq 224(%rdi), %rcx
movq 232(%r10), %r8
movq %rcx, 224(%r10)
sbbq 232(%rdi), %r8
movq 240(%r10), %rax
movq %r8, 232(%r10)
sbbq 240(%rdi), %rax
movq 248(%r10), %rcx
movq %rax, 240(%r10)
sbbq 248(%rdi), %rcx
movq %rcx, 248(%r10)
sbbq $0x00, %r9
subq $0x80, %r15
# Add
movq (%r15), %rax
addq (%r10), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r10), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r10), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r10), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r10), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r10), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r10), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r10), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r10), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r10), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r10), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r10), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r10), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r10), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r10), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r10), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r10), %rcx
movq 136(%r15), %r8
movq %rcx, 128(%r15)
adcq 136(%r10), %r8
movq 144(%r15), %rax
movq %r8, 136(%r15)
adcq 144(%r10), %rax
movq 152(%r15), %rcx
movq %rax, 144(%r15)
adcq 152(%r10), %rcx
movq 160(%r15), %r8
movq %rcx, 152(%r15)
adcq 160(%r10), %r8
movq 168(%r15), %rax
movq %r8, 160(%r15)
adcq 168(%r10), %rax
movq 176(%r15), %rcx
movq %rax, 168(%r15)
adcq 176(%r10), %rcx
movq 184(%r15), %r8
movq %rcx, 176(%r15)
adcq 184(%r10), %r8
movq 192(%r15), %rax
movq %r8, 184(%r15)
adcq 192(%r10), %rax
movq 200(%r15), %rcx
movq %rax, 192(%r15)
adcq 200(%r10), %rcx
movq 208(%r15), %r8
movq %rcx, 200(%r15)
adcq 208(%r10), %r8
movq 216(%r15), %rax
movq %r8, 208(%r15)
adcq 216(%r10), %rax
movq 224(%r15), %rcx
movq %rax, 216(%r15)
adcq 224(%r10), %rcx
movq 232(%r15), %r8
movq %rcx, 224(%r15)
adcq 232(%r10), %r8
movq 240(%r15), %rax
movq %r8, 232(%r15)
adcq 240(%r10), %rax
movq 248(%r15), %rcx
movq %rax, 240(%r15)
adcq 248(%r10), %rcx
movq %rcx, 248(%r15)
adcq $0x00, %r9
movq %r9, 384(%rdi)
addq $0x80, %r15
# Add
movq (%r15), %rax
addq (%r11), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r11), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r11), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r11), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r11), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r11), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r11), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r11), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r11), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r11), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r11), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r11), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r11), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r11), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r11), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r11), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r11), %rcx
movq %rcx, 128(%r15)
# Add to zero
movq 136(%r11), %rax
adcq $0x00, %rax
movq 144(%r11), %rcx
movq %rax, 136(%r15)
adcq $0x00, %rcx
movq 152(%r11), %r8
movq %rcx, 144(%r15)
adcq $0x00, %r8
movq 160(%r11), %rax
movq %r8, 152(%r15)
adcq $0x00, %rax
movq 168(%r11), %rcx
movq %rax, 160(%r15)
adcq $0x00, %rcx
movq 176(%r11), %r8
movq %rcx, 168(%r15)
adcq $0x00, %r8
movq 184(%r11), %rax
movq %r8, 176(%r15)
adcq $0x00, %rax
movq 192(%r11), %rcx
movq %rax, 184(%r15)
adcq $0x00, %rcx
movq 200(%r11), %r8
movq %rcx, 192(%r15)
adcq $0x00, %r8
movq 208(%r11), %rax
movq %r8, 200(%r15)
adcq $0x00, %rax
movq 216(%r11), %rcx
movq %rax, 208(%r15)
adcq $0x00, %rcx
movq 224(%r11), %r8
movq %rcx, 216(%r15)
adcq $0x00, %r8
movq 232(%r11), %rax
movq %r8, 224(%r15)
adcq $0x00, %rax
movq 240(%r11), %rcx
movq %rax, 232(%r15)
adcq $0x00, %rcx
movq 248(%r11), %r8
movq %rcx, 240(%r15)
adcq $0x00, %r8
movq %r8, 248(%r15)
addq $0x328, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_2048_mul_32,.-sp_2048_mul_32
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_2048_mul_avx2_32
.type sp_2048_mul_avx2_32,@function
.align 16
sp_2048_mul_avx2_32:
#else
.section __TEXT,__text
.globl _sp_2048_mul_avx2_32
.p2align 4
_sp_2048_mul_avx2_32:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x328, %rsp
movq %rdi, 768(%rsp)
movq %rsi, 776(%rsp)
movq %rdx, 784(%rsp)
leaq 512(%rsp), %r10
leaq 128(%rsi), %r12
# Add
movq (%rsi), %rax
xorq %r13, %r13
addq (%r12), %rax
movq 8(%rsi), %rcx
movq %rax, (%r10)
adcq 8(%r12), %rcx
movq 16(%rsi), %r8
movq %rcx, 8(%r10)
adcq 16(%r12), %r8
movq 24(%rsi), %rax
movq %r8, 16(%r10)
adcq 24(%r12), %rax
movq 32(%rsi), %rcx
movq %rax, 24(%r10)
adcq 32(%r12), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%r10)
adcq 40(%r12), %r8
movq 48(%rsi), %rax
movq %r8, 40(%r10)
adcq 48(%r12), %rax
movq 56(%rsi), %rcx
movq %rax, 48(%r10)
adcq 56(%r12), %rcx
movq 64(%rsi), %r8
movq %rcx, 56(%r10)
adcq 64(%r12), %r8
movq 72(%rsi), %rax
movq %r8, 64(%r10)
adcq 72(%r12), %rax
movq 80(%rsi), %rcx
movq %rax, 72(%r10)
adcq 80(%r12), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%r10)
adcq 88(%r12), %r8
movq 96(%rsi), %rax
movq %r8, 88(%r10)
adcq 96(%r12), %rax
movq 104(%rsi), %rcx
movq %rax, 96(%r10)
adcq 104(%r12), %rcx
movq 112(%rsi), %r8
movq %rcx, 104(%r10)
adcq 112(%r12), %r8
movq 120(%rsi), %rax
movq %r8, 112(%r10)
adcq 120(%r12), %rax
movq %rax, 120(%r10)
adcq $0x00, %r13
movq %r13, 792(%rsp)
leaq 640(%rsp), %r11
leaq 128(%rdx), %r12
# Add
movq (%rdx), %rax
xorq %r14, %r14
addq (%r12), %rax
movq 8(%rdx), %rcx
movq %rax, (%r11)
adcq 8(%r12), %rcx
movq 16(%rdx), %r8
movq %rcx, 8(%r11)
adcq 16(%r12), %r8
movq 24(%rdx), %rax
movq %r8, 16(%r11)
adcq 24(%r12), %rax
movq 32(%rdx), %rcx
movq %rax, 24(%r11)
adcq 32(%r12), %rcx
movq 40(%rdx), %r8
movq %rcx, 32(%r11)
adcq 40(%r12), %r8
movq 48(%rdx), %rax
movq %r8, 40(%r11)
adcq 48(%r12), %rax
movq 56(%rdx), %rcx
movq %rax, 48(%r11)
adcq 56(%r12), %rcx
movq 64(%rdx), %r8
movq %rcx, 56(%r11)
adcq 64(%r12), %r8
movq 72(%rdx), %rax
movq %r8, 64(%r11)
adcq 72(%r12), %rax
movq 80(%rdx), %rcx
movq %rax, 72(%r11)
adcq 80(%r12), %rcx
movq 88(%rdx), %r8
movq %rcx, 80(%r11)
adcq 88(%r12), %r8
movq 96(%rdx), %rax
movq %r8, 88(%r11)
adcq 96(%r12), %rax
movq 104(%rdx), %rcx
movq %rax, 96(%r11)
adcq 104(%r12), %rcx
movq 112(%rdx), %r8
movq %rcx, 104(%r11)
adcq 112(%r12), %r8
movq 120(%rdx), %rax
movq %r8, 112(%r11)
adcq 120(%r12), %rax
movq %rax, 120(%r11)
adcq $0x00, %r14
movq %r14, 800(%rsp)
movq %r11, %rdx
movq %r10, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_2048_mul_avx2_16@plt
#else
callq _sp_2048_mul_avx2_16
#endif /* __APPLE__ */
movq 784(%rsp), %rdx
movq 776(%rsp), %rsi
leaq 256(%rsp), %rdi
addq $0x80, %rdx
addq $0x80, %rsi
#ifndef __APPLE__
callq sp_2048_mul_avx2_16@plt
#else
callq _sp_2048_mul_avx2_16
#endif /* __APPLE__ */
movq 784(%rsp), %rdx
movq 776(%rsp), %rsi
movq 768(%rsp), %rdi
#ifndef __APPLE__
callq sp_2048_mul_avx2_16@plt
#else
callq _sp_2048_mul_avx2_16
#endif /* __APPLE__ */
#ifdef _WIN64
movq 784(%rsp), %rdx
movq 776(%rsp), %rsi
movq 768(%rsp), %rdi
#endif /* _WIN64 */
movq 792(%rsp), %r13
movq 800(%rsp), %r14
movq 768(%rsp), %r15
movq %r13, %r9
leaq 512(%rsp), %r10
leaq 640(%rsp), %r11
andq %r14, %r9
negq %r13
negq %r14
addq $0x100, %r15
movq (%r10), %rax
movq (%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
addq %rcx, %rax
movq 8(%r10), %rcx
movq 8(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, (%r15)
adcq %r8, %rcx
movq 16(%r10), %r8
movq 16(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 8(%r15)
adcq %rax, %r8
movq 24(%r10), %rax
movq 24(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 16(%r15)
adcq %rcx, %rax
movq 32(%r10), %rcx
movq 32(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 24(%r15)
adcq %r8, %rcx
movq 40(%r10), %r8
movq 40(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 32(%r15)
adcq %rax, %r8
movq 48(%r10), %rax
movq 48(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 40(%r15)
adcq %rcx, %rax
movq 56(%r10), %rcx
movq 56(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 48(%r15)
adcq %r8, %rcx
movq 64(%r10), %r8
movq 64(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 56(%r15)
adcq %rax, %r8
movq 72(%r10), %rax
movq 72(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 64(%r15)
adcq %rcx, %rax
movq 80(%r10), %rcx
movq 80(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 72(%r15)
adcq %r8, %rcx
movq 88(%r10), %r8
movq 88(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 80(%r15)
adcq %rax, %r8
movq 96(%r10), %rax
movq 96(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 88(%r15)
adcq %rcx, %rax
movq 104(%r10), %rcx
movq 104(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 96(%r15)
adcq %r8, %rcx
movq 112(%r10), %r8
movq 112(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 104(%r15)
adcq %rax, %r8
movq 120(%r10), %rax
movq 120(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 112(%r15)
adcq %rcx, %rax
movq %rax, 120(%r15)
adcq $0x00, %r9
leaq 256(%rsp), %r11
movq %rsp, %r10
movq (%r10), %rax
subq (%r11), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%r11), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%r11), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%r11), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%r11), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%r11), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%r11), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%r11), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%r11), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%r11), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%r11), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%r11), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%r11), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%r11), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%r11), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%r11), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%r11), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%r11), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%r11), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%r11), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%r11), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%r11), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%r11), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%r11), %r8
movq 192(%r10), %rax
movq %r8, 184(%r10)
sbbq 192(%r11), %rax
movq 200(%r10), %rcx
movq %rax, 192(%r10)
sbbq 200(%r11), %rcx
movq 208(%r10), %r8
movq %rcx, 200(%r10)
sbbq 208(%r11), %r8
movq 216(%r10), %rax
movq %r8, 208(%r10)
sbbq 216(%r11), %rax
movq 224(%r10), %rcx
movq %rax, 216(%r10)
sbbq 224(%r11), %rcx
movq 232(%r10), %r8
movq %rcx, 224(%r10)
sbbq 232(%r11), %r8
movq 240(%r10), %rax
movq %r8, 232(%r10)
sbbq 240(%r11), %rax
movq 248(%r10), %rcx
movq %rax, 240(%r10)
sbbq 248(%r11), %rcx
movq %rcx, 248(%r10)
sbbq $0x00, %r9
movq (%r10), %rax
subq (%rdi), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%rdi), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%rdi), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%rdi), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%rdi), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%rdi), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%rdi), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%rdi), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%rdi), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%rdi), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%rdi), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%rdi), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%rdi), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%rdi), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%rdi), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%rdi), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%rdi), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%rdi), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%rdi), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%rdi), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%rdi), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%rdi), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%rdi), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%rdi), %r8
movq 192(%r10), %rax
movq %r8, 184(%r10)
sbbq 192(%rdi), %rax
movq 200(%r10), %rcx
movq %rax, 192(%r10)
sbbq 200(%rdi), %rcx
movq 208(%r10), %r8
movq %rcx, 200(%r10)
sbbq 208(%rdi), %r8
movq 216(%r10), %rax
movq %r8, 208(%r10)
sbbq 216(%rdi), %rax
movq 224(%r10), %rcx
movq %rax, 216(%r10)
sbbq 224(%rdi), %rcx
movq 232(%r10), %r8
movq %rcx, 224(%r10)
sbbq 232(%rdi), %r8
movq 240(%r10), %rax
movq %r8, 232(%r10)
sbbq 240(%rdi), %rax
movq 248(%r10), %rcx
movq %rax, 240(%r10)
sbbq 248(%rdi), %rcx
movq %rcx, 248(%r10)
sbbq $0x00, %r9
subq $0x80, %r15
# Add
movq (%r15), %rax
addq (%r10), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r10), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r10), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r10), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r10), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r10), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r10), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r10), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r10), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r10), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r10), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r10), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r10), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r10), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r10), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r10), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r10), %rcx
movq 136(%r15), %r8
movq %rcx, 128(%r15)
adcq 136(%r10), %r8
movq 144(%r15), %rax
movq %r8, 136(%r15)
adcq 144(%r10), %rax
movq 152(%r15), %rcx
movq %rax, 144(%r15)
adcq 152(%r10), %rcx
movq 160(%r15), %r8
movq %rcx, 152(%r15)
adcq 160(%r10), %r8
movq 168(%r15), %rax
movq %r8, 160(%r15)
adcq 168(%r10), %rax
movq 176(%r15), %rcx
movq %rax, 168(%r15)
adcq 176(%r10), %rcx
movq 184(%r15), %r8
movq %rcx, 176(%r15)
adcq 184(%r10), %r8
movq 192(%r15), %rax
movq %r8, 184(%r15)
adcq 192(%r10), %rax
movq 200(%r15), %rcx
movq %rax, 192(%r15)
adcq 200(%r10), %rcx
movq 208(%r15), %r8
movq %rcx, 200(%r15)
adcq 208(%r10), %r8
movq 216(%r15), %rax
movq %r8, 208(%r15)
adcq 216(%r10), %rax
movq 224(%r15), %rcx
movq %rax, 216(%r15)
adcq 224(%r10), %rcx
movq 232(%r15), %r8
movq %rcx, 224(%r15)
adcq 232(%r10), %r8
movq 240(%r15), %rax
movq %r8, 232(%r15)
adcq 240(%r10), %rax
movq 248(%r15), %rcx
movq %rax, 240(%r15)
adcq 248(%r10), %rcx
movq %rcx, 248(%r15)
adcq $0x00, %r9
movq %r9, 384(%rdi)
addq $0x80, %r15
# Add
movq (%r15), %rax
addq (%r11), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r11), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r11), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r11), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r11), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r11), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r11), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r11), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r11), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r11), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r11), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r11), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r11), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r11), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r11), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r11), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r11), %rcx
movq %rcx, 128(%r15)
# Add to zero
movq 136(%r11), %rax
adcq $0x00, %rax
movq 144(%r11), %rcx
movq %rax, 136(%r15)
adcq $0x00, %rcx
movq 152(%r11), %r8
movq %rcx, 144(%r15)
adcq $0x00, %r8
movq 160(%r11), %rax
movq %r8, 152(%r15)
adcq $0x00, %rax
movq 168(%r11), %rcx
movq %rax, 160(%r15)
adcq $0x00, %rcx
movq 176(%r11), %r8
movq %rcx, 168(%r15)
adcq $0x00, %r8
movq 184(%r11), %rax
movq %r8, 176(%r15)
adcq $0x00, %rax
movq 192(%r11), %rcx
movq %rax, 184(%r15)
adcq $0x00, %rcx
movq 200(%r11), %r8
movq %rcx, 192(%r15)
adcq $0x00, %r8
movq 208(%r11), %rax
movq %r8, 200(%r15)
adcq $0x00, %rax
movq 216(%r11), %rcx
movq %rax, 208(%r15)
adcq $0x00, %rcx
movq 224(%r11), %r8
movq %rcx, 216(%r15)
adcq $0x00, %r8
movq 232(%r11), %rax
movq %r8, 224(%r15)
adcq $0x00, %rax
movq 240(%r11), %rcx
movq %rax, 232(%r15)
adcq $0x00, %rcx
movq 248(%r11), %r8
movq %rcx, 240(%r15)
adcq $0x00, %r8
movq %r8, 248(%r15)
addq $0x328, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_2048_mul_avx2_32,.-sp_2048_mul_avx2_32
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Square a and put result in r. (r = a * a)
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_2048_sqr_16
.type sp_2048_sqr_16,@function
.align 16
sp_2048_sqr_16:
#else
.section __TEXT,__text
.globl _sp_2048_sqr_16
.p2align 4
_sp_2048_sqr_16:
#endif /* __APPLE__ */
pushq %r12
subq $0x80, %rsp
# A[0] * A[0]
movq (%rsi), %rax
mulq %rax
xorq %r9, %r9
movq %rax, (%rsp)
movq %rdx, %r8
# A[0] * A[1]
movq 8(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 8(%rsp)
# A[0] * A[2]
movq 16(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
# A[1] * A[1]
movq 8(%rsi), %rax
mulq %rax
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 16(%rsp)
# A[0] * A[3]
movq 24(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * A[2]
movq 16(%rsi), %rax
mulq 8(%rsi)
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 24(%rsp)
# A[0] * A[4]
movq 32(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[1] * A[3]
movq 24(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[2] * A[2]
movq 16(%rsi), %rax
mulq %rax
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 32(%rsp)
# A[0] * A[5]
movq 40(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[4]
movq 32(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[3]
movq 24(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 40(%rsp)
# A[0] * A[6]
movq 48(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[5]
movq 40(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[4]
movq 32(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[3]
movq 24(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 48(%rsp)
# A[0] * A[7]
movq 56(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[6]
movq 48(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[5]
movq 40(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[4]
movq 32(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 56(%rsp)
# A[0] * A[8]
movq 64(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[7]
movq 56(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[6]
movq 48(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[5]
movq 40(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[4]
movq 32(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 64(%rsp)
# A[0] * A[9]
movq 72(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[8]
movq 64(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[7]
movq 56(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[6]
movq 48(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[5]
movq 40(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 72(%rsp)
# A[0] * A[10]
movq 80(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[9]
movq 72(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[8]
movq 64(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[7]
movq 56(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[6]
movq 48(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[5]
movq 40(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 80(%rsp)
# A[0] * A[11]
movq 88(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[10]
movq 80(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[9]
movq 72(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[8]
movq 64(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[7]
movq 56(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[6]
movq 48(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 88(%rsp)
# A[0] * A[12]
movq 96(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[11]
movq 88(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[10]
movq 80(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[9]
movq 72(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[8]
movq 64(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[7]
movq 56(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[6]
movq 48(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 96(%rsp)
# A[0] * A[13]
movq 104(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[12]
movq 96(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[11]
movq 88(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[10]
movq 80(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[9]
movq 72(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[8]
movq 64(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[7]
movq 56(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 104(%rsp)
# A[0] * A[14]
movq 112(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[13]
movq 104(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[12]
movq 96(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[11]
movq 88(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[10]
movq 80(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[9]
movq 72(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[8]
movq 64(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[7]
movq 56(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 112(%rsp)
# A[0] * A[15]
movq 120(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[14]
movq 112(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[13]
movq 104(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[12]
movq 96(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[11]
movq 88(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[10]
movq 80(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[9]
movq 72(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[8]
movq 64(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 120(%rsp)
# A[1] * A[15]
movq 120(%rsi), %rax
mulq 8(%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[2] * A[14]
movq 112(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[13]
movq 104(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[12]
movq 96(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[11]
movq 88(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[10]
movq 80(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[9]
movq 72(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[8]
movq 64(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 128(%rdi)
# A[2] * A[15]
movq 120(%rsi), %rax
mulq 16(%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[3] * A[14]
movq 112(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[13]
movq 104(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[12]
movq 96(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[11]
movq 88(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[10]
movq 80(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[9]
movq 72(%rsi), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 136(%rdi)
# A[3] * A[15]
movq 120(%rsi), %rax
mulq 24(%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[4] * A[14]
movq 112(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[13]
movq 104(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[12]
movq 96(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[11]
movq 88(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[10]
movq 80(%rsi), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[9] * A[9]
movq 72(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 144(%rdi)
# A[4] * A[15]
movq 120(%rsi), %rax
mulq 32(%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[5] * A[14]
movq 112(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[13]
movq 104(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[12]
movq 96(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[11]
movq 88(%rsi), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[9] * A[10]
movq 80(%rsi), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 152(%rdi)
# A[5] * A[15]
movq 120(%rsi), %rax
mulq 40(%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[6] * A[14]
movq 112(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[13]
movq 104(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[12]
movq 96(%rsi), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[9] * A[11]
movq 88(%rsi), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[10] * A[10]
movq 80(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 160(%rdi)
# A[6] * A[15]
movq 120(%rsi), %rax
mulq 48(%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[7] * A[14]
movq 112(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[13]
movq 104(%rsi), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[9] * A[12]
movq 96(%rsi), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[10] * A[11]
movq 88(%rsi), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 168(%rdi)
# A[7] * A[15]
movq 120(%rsi), %rax
mulq 56(%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[8] * A[14]
movq 112(%rsi), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[9] * A[13]
movq 104(%rsi), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[10] * A[12]
movq 96(%rsi), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[11] * A[11]
movq 88(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 176(%rdi)
# A[8] * A[15]
movq 120(%rsi), %rax
mulq 64(%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[9] * A[14]
movq 112(%rsi), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[10] * A[13]
movq 104(%rsi), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[11] * A[12]
movq 96(%rsi), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 184(%rdi)
# A[9] * A[15]
movq 120(%rsi), %rax
mulq 72(%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[10] * A[14]
movq 112(%rsi), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[11] * A[13]
movq 104(%rsi), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[12] * A[12]
movq 96(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 192(%rdi)
# A[10] * A[15]
movq 120(%rsi), %rax
mulq 80(%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[11] * A[14]
movq 112(%rsi), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[12] * A[13]
movq 104(%rsi), %rax
mulq 96(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 200(%rdi)
# A[11] * A[15]
movq 120(%rsi), %rax
mulq 88(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
# A[12] * A[14]
movq 112(%rsi), %rax
mulq 96(%rsi)
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
# A[13] * A[13]
movq 104(%rsi), %rax
mulq %rax
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 208(%rdi)
# A[12] * A[15]
movq 120(%rsi), %rax
mulq 96(%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[13] * A[14]
movq 112(%rsi), %rax
mulq 104(%rsi)
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 216(%rdi)
# A[13] * A[15]
movq 120(%rsi), %rax
mulq 104(%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[14] * A[14]
movq 112(%rsi), %rax
mulq %rax
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 224(%rdi)
# A[14] * A[15]
movq 120(%rsi), %rax
mulq 112(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 232(%rdi)
# A[15] * A[15]
movq 120(%rsi), %rax
mulq %rax
addq %rax, %rcx
adcq %rdx, %r8
movq %rcx, 240(%rdi)
movq %r8, 248(%rdi)
movq (%rsp), %rax
movq 8(%rsp), %rdx
movq 16(%rsp), %r10
movq 24(%rsp), %r11
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq 32(%rsp), %rax
movq 40(%rsp), %rdx
movq 48(%rsp), %r10
movq 56(%rsp), %r11
movq %rax, 32(%rdi)
movq %rdx, 40(%rdi)
movq %r10, 48(%rdi)
movq %r11, 56(%rdi)
movq 64(%rsp), %rax
movq 72(%rsp), %rdx
movq 80(%rsp), %r10
movq 88(%rsp), %r11
movq %rax, 64(%rdi)
movq %rdx, 72(%rdi)
movq %r10, 80(%rdi)
movq %r11, 88(%rdi)
movq 96(%rsp), %rax
movq 104(%rsp), %rdx
movq 112(%rsp), %r10
movq 120(%rsp), %r11
movq %rax, 96(%rdi)
movq %rdx, 104(%rdi)
movq %r10, 112(%rdi)
movq %r11, 120(%rdi)
addq $0x80, %rsp
popq %r12
repz retq
#ifndef __APPLE__
.size sp_2048_sqr_16,.-sp_2048_sqr_16
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Square a and put result in r. (r = a * a)
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_2048_sqr_avx2_16
.type sp_2048_sqr_avx2_16,@function
.align 16
sp_2048_sqr_avx2_16:
#else
.section __TEXT,__text
.globl _sp_2048_sqr_avx2_16
.p2align 4
_sp_2048_sqr_avx2_16:
#endif /* __APPLE__ */
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
subq $0x80, %rsp
cmpq %rdi, %rsi
movq %rsp, %rbp
cmovne %rdi, %rbp
addq $0x80, %rdi
xorq %r11, %r11
# Diagonal 1
# Zero into %r9
# Zero into %r10
# A[1] x A[0]
movq (%rsi), %rdx
mulxq 8(%rsi), %r8, %r9
# A[2] x A[0]
mulxq 16(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 8(%rbp)
movq %r9, 16(%rbp)
# Zero into %r8
# Zero into %r9
# A[3] x A[0]
mulxq 24(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
# A[4] x A[0]
mulxq 32(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r11, %r9
movq %r10, 24(%rbp)
movq %r8, 32(%rbp)
# Zero into %r10
# Zero into %r8
# A[5] x A[0]
mulxq 40(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
# A[6] x A[0]
mulxq 48(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r9, 40(%rbp)
movq %r10, 48(%rbp)
# Zero into %r9
# Zero into %r10
# A[7] x A[0]
mulxq 56(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r11, %r9
# A[8] x A[0]
mulxq 64(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 56(%rbp)
movq %r9, 64(%rbp)
# Zero into %r8
# Zero into %r9
# A[9] x A[0]
mulxq 72(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
# A[10] x A[0]
mulxq 80(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r11, %r9
movq %r10, 72(%rbp)
movq %r8, 80(%rbp)
# No load %r13 - %r10
# A[11] x A[0]
mulxq 88(%rsi), %rax, %r13
adcxq %rax, %r9
adoxq %r11, %r13
# A[12] x A[0]
mulxq 96(%rsi), %rax, %r14
adcxq %rax, %r13
adoxq %r11, %r14
movq %r9, 88(%rbp)
# No store %r13 - %r10
# No load %r15 - %r9
# A[13] x A[0]
mulxq 104(%rsi), %rax, %r15
adcxq %rax, %r14
adoxq %r11, %r15
# A[14] x A[0]
mulxq 112(%rsi), %rax, %rbx
adcxq %rax, %r15
adoxq %r11, %rbx
# No store %r14 - %r8
# No store %r15 - %r9
# Zero into %r8
# Zero into %r9
# A[15] x A[0]
mulxq 120(%rsi), %rax, %r8
adcxq %rax, %rbx
adoxq %r11, %r8
# No store %rbx - %r10
# Carry
adcxq %r11, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, (%rdi)
# Diagonal 2
movq 24(%rbp), %r8
movq 32(%rbp), %r9
movq 40(%rbp), %r10
# A[2] x A[1]
movq 8(%rsi), %rdx
mulxq 16(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[3] x A[1]
mulxq 24(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 24(%rbp)
movq %r9, 32(%rbp)
movq 48(%rbp), %r8
movq 56(%rbp), %r9
# A[4] x A[1]
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[5] x A[1]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 40(%rbp)
movq %r8, 48(%rbp)
movq 64(%rbp), %r10
movq 72(%rbp), %r8
# A[6] x A[1]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[7] x A[1]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 56(%rbp)
movq %r10, 64(%rbp)
movq 80(%rbp), %r9
movq 88(%rbp), %r10
# A[8] x A[1]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[9] x A[1]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 72(%rbp)
movq %r9, 80(%rbp)
# No load %r13 - %r8
# A[10] x A[1]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r13
# A[11] x A[1]
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
movq %r10, 88(%rbp)
# No store %r13 - %r8
# No load %r15 - %r10
# A[12] x A[1]
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# A[13] x A[1]
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r14 - %r9
# No store %r15 - %r10
movq (%rdi), %r9
# Zero into %r10
# A[14] x A[1]
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r9
# A[15] x A[1]
mulxq 120(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
# No store %rbx - %r8
movq %r9, (%rdi)
# Zero into %r8
# Zero into %r9
# A[15] x A[2]
movq 16(%rsi), %rdx
mulxq 120(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 8(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 16(%rdi)
# Diagonal 3
movq 40(%rbp), %r8
movq 48(%rbp), %r9
movq 56(%rbp), %r10
# A[3] x A[2]
mulxq 24(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[4] x A[2]
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 40(%rbp)
movq %r9, 48(%rbp)
movq 64(%rbp), %r8
movq 72(%rbp), %r9
# A[5] x A[2]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[6] x A[2]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 56(%rbp)
movq %r8, 64(%rbp)
movq 80(%rbp), %r10
movq 88(%rbp), %r8
# A[7] x A[2]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[8] x A[2]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 72(%rbp)
movq %r10, 80(%rbp)
# No load %r13 - %r9
# A[9] x A[2]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r13
# A[10] x A[2]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
movq %r8, 88(%rbp)
# No store %r13 - %r9
# No load %r15 - %r8
# A[11] x A[2]
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# A[12] x A[2]
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r14 - %r10
# No store %r15 - %r8
movq (%rdi), %r10
movq 8(%rdi), %r8
# A[13] x A[2]
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r10
# A[14] x A[2]
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# No store %rbx - %r9
movq %r10, (%rdi)
movq 16(%rdi), %r9
# Zero into %r10
# A[14] x A[3]
movq 24(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[14] x A[4]
movq 32(%rsi), %rdx
mulxq 112(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
# Zero into %r8
# Zero into %r9
# A[14] x A[5]
movq 40(%rsi), %rdx
mulxq 112(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 24(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 32(%rdi)
# Diagonal 4
movq 56(%rbp), %r8
movq 64(%rbp), %r9
movq 72(%rbp), %r10
# A[4] x A[3]
movq 24(%rsi), %rdx
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] x A[3]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 56(%rbp)
movq %r9, 64(%rbp)
movq 80(%rbp), %r8
movq 88(%rbp), %r9
# A[6] x A[3]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[7] x A[3]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 72(%rbp)
movq %r8, 80(%rbp)
# No load %r13 - %r10
# A[8] x A[3]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r13
# A[9] x A[3]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
movq %r9, 88(%rbp)
# No store %r13 - %r10
# No load %r15 - %r9
# A[10] x A[3]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# A[11] x A[3]
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r14 - %r8
# No store %r15 - %r9
movq (%rdi), %r8
movq 8(%rdi), %r9
# A[12] x A[3]
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r8
# A[13] x A[3]
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# No store %rbx - %r10
movq %r8, (%rdi)
movq 16(%rdi), %r10
movq 24(%rdi), %r8
# A[13] x A[4]
movq 32(%rsi), %rdx
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[13] x A[5]
movq 40(%rsi), %rdx
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq 32(%rdi), %r9
# Zero into %r10
# A[13] x A[6]
movq 48(%rsi), %rdx
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[13] x A[7]
movq 56(%rsi), %rdx
mulxq 104(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
# Zero into %r8
# Zero into %r9
# A[13] x A[8]
movq 64(%rsi), %rdx
mulxq 104(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 40(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 48(%rdi)
# Diagonal 5
movq 72(%rbp), %r8
movq 80(%rbp), %r9
movq 88(%rbp), %r10
# A[5] x A[4]
movq 32(%rsi), %rdx
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[6] x A[4]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 72(%rbp)
movq %r9, 80(%rbp)
# No load %r13 - %r8
# A[7] x A[4]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r13
# A[8] x A[4]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
movq %r10, 88(%rbp)
# No store %r13 - %r8
# No load %r15 - %r10
# A[9] x A[4]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# A[10] x A[4]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r14 - %r9
# No store %r15 - %r10
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[11] x A[4]
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r9
# A[12] x A[4]
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# No store %rbx - %r8
movq %r9, (%rdi)
movq 16(%rdi), %r8
movq 24(%rdi), %r9
# A[12] x A[5]
movq 40(%rsi), %rdx
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[12] x A[6]
movq 48(%rsi), %rdx
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 8(%rdi)
movq %r8, 16(%rdi)
movq 32(%rdi), %r10
movq 40(%rdi), %r8
# A[12] x A[7]
movq 56(%rsi), %rdx
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[12] x A[8]
movq 64(%rsi), %rdx
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 24(%rdi)
movq %r10, 32(%rdi)
movq 48(%rdi), %r9
# Zero into %r10
# A[12] x A[9]
movq 72(%rsi), %rdx
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[12] x A[10]
movq 80(%rsi), %rdx
mulxq 96(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 40(%rdi)
movq %r9, 48(%rdi)
# Zero into %r8
# Zero into %r9
# A[12] x A[11]
movq 88(%rsi), %rdx
mulxq 96(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 56(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 64(%rdi)
# Diagonal 6
movq 88(%rbp), %r8
# No load %r13 - %r9
# A[6] x A[5]
movq 40(%rsi), %rdx
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r13
# A[7] x A[5]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
movq %r8, 88(%rbp)
# No store %r13 - %r9
# No load %r15 - %r8
# A[8] x A[5]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# A[9] x A[5]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r14 - %r10
# No store %r15 - %r8
movq (%rdi), %r10
movq 8(%rdi), %r8
# A[10] x A[5]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r10
# A[11] x A[5]
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# No store %rbx - %r9
movq %r10, (%rdi)
movq 16(%rdi), %r9
movq 24(%rdi), %r10
# A[11] x A[6]
movq 48(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[11] x A[7]
movq 56(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq 32(%rdi), %r8
movq 40(%rdi), %r9
# A[11] x A[8]
movq 64(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[11] x A[9]
movq 72(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 24(%rdi)
movq %r8, 32(%rdi)
movq 48(%rdi), %r10
movq 56(%rdi), %r8
# A[11] x A[10]
movq 80(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[13] x A[9]
movq 72(%rsi), %rdx
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 40(%rdi)
movq %r10, 48(%rdi)
movq 64(%rdi), %r9
# Zero into %r10
# A[13] x A[10]
movq 80(%rsi), %rdx
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[13] x A[11]
movq 88(%rsi), %rdx
mulxq 104(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 56(%rdi)
movq %r9, 64(%rdi)
# Zero into %r8
# Zero into %r9
# A[13] x A[12]
movq 96(%rsi), %rdx
mulxq 104(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 72(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 80(%rdi)
# Diagonal 7
# No load %r15 - %r9
# A[7] x A[6]
movq 48(%rsi), %rdx
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# A[8] x A[6]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r14 - %r8
# No store %r15 - %r9
movq (%rdi), %r8
movq 8(%rdi), %r9
# A[9] x A[6]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r8
# A[10] x A[6]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# No store %rbx - %r10
movq %r8, (%rdi)
movq 16(%rdi), %r10
movq 24(%rdi), %r8
# A[10] x A[7]
movq 56(%rsi), %rdx
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[10] x A[8]
movq 64(%rsi), %rdx
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq 32(%rdi), %r9
movq 40(%rdi), %r10
# A[10] x A[9]
movq 72(%rsi), %rdx
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[14] x A[6]
movq 48(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
movq 48(%rdi), %r8
movq 56(%rdi), %r9
# A[14] x A[7]
movq 56(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[14] x A[8]
movq 64(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 40(%rdi)
movq %r8, 48(%rdi)
movq 64(%rdi), %r10
movq 72(%rdi), %r8
# A[14] x A[9]
movq 72(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[14] x A[10]
movq 80(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 56(%rdi)
movq %r10, 64(%rdi)
movq 80(%rdi), %r9
# Zero into %r10
# A[14] x A[11]
movq 88(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[14] x A[12]
movq 96(%rsi), %rdx
mulxq 112(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 72(%rdi)
movq %r9, 80(%rdi)
# Zero into %r8
# Zero into %r9
# A[14] x A[13]
movq 104(%rsi), %rdx
mulxq 112(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 88(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 96(%rdi)
# Diagonal 8
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[8] x A[7]
movq 56(%rsi), %rdx
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r9
# A[9] x A[7]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# No store %rbx - %r8
movq %r9, (%rdi)
movq 16(%rdi), %r8
movq 24(%rdi), %r9
# A[9] x A[8]
movq 64(%rsi), %rdx
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[15] x A[3]
movq 24(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 8(%rdi)
movq %r8, 16(%rdi)
movq 32(%rdi), %r10
movq 40(%rdi), %r8
# A[15] x A[4]
movq 32(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[15] x A[5]
movq 40(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 24(%rdi)
movq %r10, 32(%rdi)
movq 48(%rdi), %r9
movq 56(%rdi), %r10
# A[15] x A[6]
movq 48(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[15] x A[7]
movq 56(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 40(%rdi)
movq %r9, 48(%rdi)
movq 64(%rdi), %r8
movq 72(%rdi), %r9
# A[15] x A[8]
movq 64(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[15] x A[9]
movq 72(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 56(%rdi)
movq %r8, 64(%rdi)
movq 80(%rdi), %r10
movq 88(%rdi), %r8
# A[15] x A[10]
movq 80(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[15] x A[11]
movq 88(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 72(%rdi)
movq %r10, 80(%rdi)
movq 96(%rdi), %r9
# Zero into %r10
# A[15] x A[12]
movq 96(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[15] x A[13]
movq 104(%rsi), %rdx
mulxq 120(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 88(%rdi)
movq %r9, 96(%rdi)
# Zero into %r8
# Zero into %r9
# A[15] x A[14]
movq 112(%rsi), %rdx
mulxq 120(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 104(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 112(%rdi)
movq %r12, 120(%rdi)
# Double and Add in A[i] x A[i]
movq 8(%rbp), %r9
# A[0] x A[0]
movq (%rsi), %rdx
mulxq %rdx, %rax, %rcx
movq %rax, (%rbp)
adoxq %r9, %r9
adcxq %rcx, %r9
movq %r9, 8(%rbp)
movq 16(%rbp), %r8
movq 24(%rbp), %r9
# A[1] x A[1]
movq 8(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 16(%rbp)
movq %r9, 24(%rbp)
movq 32(%rbp), %r8
movq 40(%rbp), %r9
# A[2] x A[2]
movq 16(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 32(%rbp)
movq %r9, 40(%rbp)
movq 48(%rbp), %r8
movq 56(%rbp), %r9
# A[3] x A[3]
movq 24(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 48(%rbp)
movq %r9, 56(%rbp)
movq 64(%rbp), %r8
movq 72(%rbp), %r9
# A[4] x A[4]
movq 32(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 64(%rbp)
movq %r9, 72(%rbp)
movq 80(%rbp), %r8
movq 88(%rbp), %r9
# A[5] x A[5]
movq 40(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 80(%rbp)
movq %r9, 88(%rbp)
# A[6] x A[6]
movq 48(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r13, %r13
adoxq %r14, %r14
adcxq %rax, %r13
adcxq %rcx, %r14
# A[7] x A[7]
movq 56(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r15, %r15
adoxq %rbx, %rbx
adcxq %rax, %r15
adcxq %rcx, %rbx
movq (%rdi), %r8
movq 8(%rdi), %r9
# A[8] x A[8]
movq 64(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq 16(%rdi), %r8
movq 24(%rdi), %r9
# A[9] x A[9]
movq 72(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rdi), %r8
movq 40(%rdi), %r9
# A[10] x A[10]
movq 80(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 32(%rdi)
movq %r9, 40(%rdi)
movq 48(%rdi), %r8
movq 56(%rdi), %r9
# A[11] x A[11]
movq 88(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rdi), %r8
movq 72(%rdi), %r9
# A[12] x A[12]
movq 96(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 64(%rdi)
movq %r9, 72(%rdi)
movq 80(%rdi), %r8
movq 88(%rdi), %r9
# A[13] x A[13]
movq 104(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
movq 96(%rdi), %r8
movq 104(%rdi), %r9
# A[14] x A[14]
movq 112(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 96(%rdi)
movq %r9, 104(%rdi)
movq 112(%rdi), %r8
movq 120(%rdi), %r9
# A[15] x A[15]
movq 120(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
movq %r13, -32(%rdi)
movq %r14, -24(%rdi)
movq %r15, -16(%rdi)
movq %rbx, -8(%rdi)
subq $0x80, %rdi
cmpq %rdi, %rsi
jne L_end_2048_sqr_avx2_16
vmovdqu (%rbp), %xmm0
vmovups %xmm0, (%rdi)
vmovdqu 16(%rbp), %xmm0
vmovups %xmm0, 16(%rdi)
vmovdqu 32(%rbp), %xmm0
vmovups %xmm0, 32(%rdi)
vmovdqu 48(%rbp), %xmm0
vmovups %xmm0, 48(%rdi)
vmovdqu 64(%rbp), %xmm0
vmovups %xmm0, 64(%rdi)
vmovdqu 80(%rbp), %xmm0
vmovups %xmm0, 80(%rdi)
L_end_2048_sqr_avx2_16:
addq $0x80, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
repz retq
#ifndef __APPLE__
.size sp_2048_sqr_avx2_16,.-sp_2048_sqr_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Square a and put result in r. (r = a * a)
*
* Karatsuba: ah^2, al^2, (al - ah)^2
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_2048_sqr_32
.type sp_2048_sqr_32,@function
.align 16
sp_2048_sqr_32:
#else
.section __TEXT,__text
.globl _sp_2048_sqr_32
.p2align 4
_sp_2048_sqr_32:
#endif /* __APPLE__ */
subq $0x110, %rsp
movq %rdi, 256(%rsp)
movq %rsi, 264(%rsp)
movq $0x00, %rcx
movq %rsp, %r8
leaq 128(%rsi), %r9
movq (%rsi), %rdx
subq (%r9), %rdx
movq 8(%rsi), %rax
movq %rdx, (%r8)
sbbq 8(%r9), %rax
movq 16(%rsi), %rdx
movq %rax, 8(%r8)
sbbq 16(%r9), %rdx
movq 24(%rsi), %rax
movq %rdx, 16(%r8)
sbbq 24(%r9), %rax
movq 32(%rsi), %rdx
movq %rax, 24(%r8)
sbbq 32(%r9), %rdx
movq 40(%rsi), %rax
movq %rdx, 32(%r8)
sbbq 40(%r9), %rax
movq 48(%rsi), %rdx
movq %rax, 40(%r8)
sbbq 48(%r9), %rdx
movq 56(%rsi), %rax
movq %rdx, 48(%r8)
sbbq 56(%r9), %rax
movq 64(%rsi), %rdx
movq %rax, 56(%r8)
sbbq 64(%r9), %rdx
movq 72(%rsi), %rax
movq %rdx, 64(%r8)
sbbq 72(%r9), %rax
movq 80(%rsi), %rdx
movq %rax, 72(%r8)
sbbq 80(%r9), %rdx
movq 88(%rsi), %rax
movq %rdx, 80(%r8)
sbbq 88(%r9), %rax
movq 96(%rsi), %rdx
movq %rax, 88(%r8)
sbbq 96(%r9), %rdx
movq 104(%rsi), %rax
movq %rdx, 96(%r8)
sbbq 104(%r9), %rax
movq 112(%rsi), %rdx
movq %rax, 104(%r8)
sbbq 112(%r9), %rdx
movq 120(%rsi), %rax
movq %rdx, 112(%r8)
sbbq 120(%r9), %rax
movq %rax, 120(%r8)
sbbq $0x00, %rcx
# Cond Negate
movq (%r8), %rdx
movq %rcx, %r9
xorq %rcx, %rdx
negq %r9
subq %rcx, %rdx
movq 8(%r8), %rax
sbbq $0x00, %r9
movq %rdx, (%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 16(%r8), %rdx
setc %r9b
movq %rax, 8(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 24(%r8), %rax
setc %r9b
movq %rdx, 16(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 32(%r8), %rdx
setc %r9b
movq %rax, 24(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 40(%r8), %rax
setc %r9b
movq %rdx, 32(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 48(%r8), %rdx
setc %r9b
movq %rax, 40(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 56(%r8), %rax
setc %r9b
movq %rdx, 48(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 64(%r8), %rdx
setc %r9b
movq %rax, 56(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 72(%r8), %rax
setc %r9b
movq %rdx, 64(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 80(%r8), %rdx
setc %r9b
movq %rax, 72(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 88(%r8), %rax
setc %r9b
movq %rdx, 80(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 96(%r8), %rdx
setc %r9b
movq %rax, 88(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 104(%r8), %rax
setc %r9b
movq %rdx, 96(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 112(%r8), %rdx
setc %r9b
movq %rax, 104(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 120(%r8), %rax
setc %r9b
movq %rdx, 112(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq %rax, 120(%r8)
movq %r8, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_2048_sqr_16@plt
#else
callq _sp_2048_sqr_16
#endif /* __APPLE__ */
movq 264(%rsp), %rsi
movq 256(%rsp), %rdi
addq $0x80, %rsi
addq $0x100, %rdi
#ifndef __APPLE__
callq sp_2048_sqr_16@plt
#else
callq _sp_2048_sqr_16
#endif /* __APPLE__ */
movq 264(%rsp), %rsi
movq 256(%rsp), %rdi
#ifndef __APPLE__
callq sp_2048_sqr_16@plt
#else
callq _sp_2048_sqr_16
#endif /* __APPLE__ */
#ifdef _WIN64
movq 264(%rsp), %rsi
movq 256(%rsp), %rdi
#endif /* _WIN64 */
movq 256(%rsp), %rsi
leaq 128(%rsp), %r8
addq $0x180, %rsi
movq $0x00, %rcx
movq -128(%r8), %rax
subq -128(%rsi), %rax
movq -120(%r8), %rdx
movq %rax, -128(%r8)
sbbq -120(%rsi), %rdx
movq -112(%r8), %rax
movq %rdx, -120(%r8)
sbbq -112(%rsi), %rax
movq -104(%r8), %rdx
movq %rax, -112(%r8)
sbbq -104(%rsi), %rdx
movq -96(%r8), %rax
movq %rdx, -104(%r8)
sbbq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq 96(%r8), %rax
movq %rdx, 88(%r8)
sbbq 96(%rsi), %rax
movq 104(%r8), %rdx
movq %rax, 96(%r8)
sbbq 104(%rsi), %rdx
movq 112(%r8), %rax
movq %rdx, 104(%r8)
sbbq 112(%rsi), %rax
movq 120(%r8), %rdx
movq %rax, 112(%r8)
sbbq 120(%rsi), %rdx
movq %rdx, 120(%r8)
sbbq $0x00, %rcx
subq $0x100, %rsi
movq -128(%r8), %rax
subq -128(%rsi), %rax
movq -120(%r8), %rdx
movq %rax, -128(%r8)
sbbq -120(%rsi), %rdx
movq -112(%r8), %rax
movq %rdx, -120(%r8)
sbbq -112(%rsi), %rax
movq -104(%r8), %rdx
movq %rax, -112(%r8)
sbbq -104(%rsi), %rdx
movq -96(%r8), %rax
movq %rdx, -104(%r8)
sbbq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq 96(%r8), %rax
movq %rdx, 88(%r8)
sbbq 96(%rsi), %rax
movq 104(%r8), %rdx
movq %rax, 96(%r8)
sbbq 104(%rsi), %rdx
movq 112(%r8), %rax
movq %rdx, 104(%r8)
sbbq 112(%rsi), %rax
movq 120(%r8), %rdx
movq %rax, 112(%r8)
sbbq 120(%rsi), %rdx
movq %rdx, 120(%r8)
sbbq $0x00, %rcx
movq 256(%rsp), %rdi
negq %rcx
addq $0x100, %rdi
movq -128(%rdi), %rax
subq -128(%r8), %rax
movq -120(%rdi), %rdx
movq %rax, -128(%rdi)
sbbq -120(%r8), %rdx
movq -112(%rdi), %rax
movq %rdx, -120(%rdi)
sbbq -112(%r8), %rax
movq -104(%rdi), %rdx
movq %rax, -112(%rdi)
sbbq -104(%r8), %rdx
movq -96(%rdi), %rax
movq %rdx, -104(%rdi)
sbbq -96(%r8), %rax
movq -88(%rdi), %rdx
movq %rax, -96(%rdi)
sbbq -88(%r8), %rdx
movq -80(%rdi), %rax
movq %rdx, -88(%rdi)
sbbq -80(%r8), %rax
movq -72(%rdi), %rdx
movq %rax, -80(%rdi)
sbbq -72(%r8), %rdx
movq -64(%rdi), %rax
movq %rdx, -72(%rdi)
sbbq -64(%r8), %rax
movq -56(%rdi), %rdx
movq %rax, -64(%rdi)
sbbq -56(%r8), %rdx
movq -48(%rdi), %rax
movq %rdx, -56(%rdi)
sbbq -48(%r8), %rax
movq -40(%rdi), %rdx
movq %rax, -48(%rdi)
sbbq -40(%r8), %rdx
movq -32(%rdi), %rax
movq %rdx, -40(%rdi)
sbbq -32(%r8), %rax
movq -24(%rdi), %rdx
movq %rax, -32(%rdi)
sbbq -24(%r8), %rdx
movq -16(%rdi), %rax
movq %rdx, -24(%rdi)
sbbq -16(%r8), %rax
movq -8(%rdi), %rdx
movq %rax, -16(%rdi)
sbbq -8(%r8), %rdx
movq (%rdi), %rax
movq %rdx, -8(%rdi)
sbbq (%r8), %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
sbbq 8(%r8), %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
sbbq 16(%r8), %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
sbbq 24(%r8), %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
sbbq 32(%r8), %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
sbbq 40(%r8), %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
sbbq 48(%r8), %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
sbbq 56(%r8), %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
sbbq 64(%r8), %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
sbbq 72(%r8), %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
sbbq 80(%r8), %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
sbbq 88(%r8), %rdx
movq 96(%rdi), %rax
movq %rdx, 88(%rdi)
sbbq 96(%r8), %rax
movq 104(%rdi), %rdx
movq %rax, 96(%rdi)
sbbq 104(%r8), %rdx
movq 112(%rdi), %rax
movq %rdx, 104(%rdi)
sbbq 112(%r8), %rax
movq 120(%rdi), %rdx
movq %rax, 112(%rdi)
sbbq 120(%r8), %rdx
movq %rdx, 120(%rdi)
sbbq $0x00, %rcx
movq 256(%rsp), %rdi
addq $0x180, %rdi
# Add in word
movq (%rdi), %rax
addq %rcx, %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
adcq $0x00, %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
adcq $0x00, %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
adcq $0x00, %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
adcq $0x00, %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
adcq $0x00, %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
adcq $0x00, %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
adcq $0x00, %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
adcq $0x00, %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
adcq $0x00, %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
adcq $0x00, %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
adcq $0x00, %rdx
movq 96(%rdi), %rax
movq %rdx, 88(%rdi)
adcq $0x00, %rax
movq 104(%rdi), %rdx
movq %rax, 96(%rdi)
adcq $0x00, %rdx
movq 112(%rdi), %rax
movq %rdx, 104(%rdi)
adcq $0x00, %rax
movq 120(%rdi), %rdx
movq %rax, 112(%rdi)
adcq $0x00, %rdx
movq %rdx, 120(%rdi)
movq 264(%rsp), %rsi
movq 256(%rsp), %rdi
addq $0x110, %rsp
repz retq
#ifndef __APPLE__
.size sp_2048_sqr_32,.-sp_2048_sqr_32
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Square a and put result in r. (r = a * a)
*
* Karatsuba: ah^2, al^2, (al - ah)^2
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_2048_sqr_avx2_32
.type sp_2048_sqr_avx2_32,@function
.align 16
sp_2048_sqr_avx2_32:
#else
.section __TEXT,__text
.globl _sp_2048_sqr_avx2_32
.p2align 4
_sp_2048_sqr_avx2_32:
#endif /* __APPLE__ */
subq $0x110, %rsp
movq %rdi, 256(%rsp)
movq %rsi, 264(%rsp)
movq $0x00, %rcx
movq %rsp, %r8
leaq 128(%rsi), %r9
movq (%rsi), %rdx
subq (%r9), %rdx
movq 8(%rsi), %rax
movq %rdx, (%r8)
sbbq 8(%r9), %rax
movq 16(%rsi), %rdx
movq %rax, 8(%r8)
sbbq 16(%r9), %rdx
movq 24(%rsi), %rax
movq %rdx, 16(%r8)
sbbq 24(%r9), %rax
movq 32(%rsi), %rdx
movq %rax, 24(%r8)
sbbq 32(%r9), %rdx
movq 40(%rsi), %rax
movq %rdx, 32(%r8)
sbbq 40(%r9), %rax
movq 48(%rsi), %rdx
movq %rax, 40(%r8)
sbbq 48(%r9), %rdx
movq 56(%rsi), %rax
movq %rdx, 48(%r8)
sbbq 56(%r9), %rax
movq 64(%rsi), %rdx
movq %rax, 56(%r8)
sbbq 64(%r9), %rdx
movq 72(%rsi), %rax
movq %rdx, 64(%r8)
sbbq 72(%r9), %rax
movq 80(%rsi), %rdx
movq %rax, 72(%r8)
sbbq 80(%r9), %rdx
movq 88(%rsi), %rax
movq %rdx, 80(%r8)
sbbq 88(%r9), %rax
movq 96(%rsi), %rdx
movq %rax, 88(%r8)
sbbq 96(%r9), %rdx
movq 104(%rsi), %rax
movq %rdx, 96(%r8)
sbbq 104(%r9), %rax
movq 112(%rsi), %rdx
movq %rax, 104(%r8)
sbbq 112(%r9), %rdx
movq 120(%rsi), %rax
movq %rdx, 112(%r8)
sbbq 120(%r9), %rax
movq %rax, 120(%r8)
sbbq $0x00, %rcx
# Cond Negate
movq (%r8), %rdx
movq %rcx, %r9
xorq %rcx, %rdx
negq %r9
subq %rcx, %rdx
movq 8(%r8), %rax
sbbq $0x00, %r9
movq %rdx, (%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 16(%r8), %rdx
setc %r9b
movq %rax, 8(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 24(%r8), %rax
setc %r9b
movq %rdx, 16(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 32(%r8), %rdx
setc %r9b
movq %rax, 24(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 40(%r8), %rax
setc %r9b
movq %rdx, 32(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 48(%r8), %rdx
setc %r9b
movq %rax, 40(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 56(%r8), %rax
setc %r9b
movq %rdx, 48(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 64(%r8), %rdx
setc %r9b
movq %rax, 56(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 72(%r8), %rax
setc %r9b
movq %rdx, 64(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 80(%r8), %rdx
setc %r9b
movq %rax, 72(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 88(%r8), %rax
setc %r9b
movq %rdx, 80(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 96(%r8), %rdx
setc %r9b
movq %rax, 88(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 104(%r8), %rax
setc %r9b
movq %rdx, 96(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 112(%r8), %rdx
setc %r9b
movq %rax, 104(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 120(%r8), %rax
setc %r9b
movq %rdx, 112(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq %rax, 120(%r8)
movq %r8, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_2048_sqr_avx2_16@plt
#else
callq _sp_2048_sqr_avx2_16
#endif /* __APPLE__ */
movq 264(%rsp), %rsi
movq 256(%rsp), %rdi
addq $0x80, %rsi
addq $0x100, %rdi
#ifndef __APPLE__
callq sp_2048_sqr_avx2_16@plt
#else
callq _sp_2048_sqr_avx2_16
#endif /* __APPLE__ */
movq 264(%rsp), %rsi
movq 256(%rsp), %rdi
#ifndef __APPLE__
callq sp_2048_sqr_avx2_16@plt
#else
callq _sp_2048_sqr_avx2_16
#endif /* __APPLE__ */
#ifdef _WIN64
movq 264(%rsp), %rsi
movq 256(%rsp), %rdi
#endif /* _WIN64 */
movq 256(%rsp), %rsi
leaq 128(%rsp), %r8
addq $0x180, %rsi
movq $0x00, %rcx
movq -128(%r8), %rax
subq -128(%rsi), %rax
movq -120(%r8), %rdx
movq %rax, -128(%r8)
sbbq -120(%rsi), %rdx
movq -112(%r8), %rax
movq %rdx, -120(%r8)
sbbq -112(%rsi), %rax
movq -104(%r8), %rdx
movq %rax, -112(%r8)
sbbq -104(%rsi), %rdx
movq -96(%r8), %rax
movq %rdx, -104(%r8)
sbbq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq 96(%r8), %rax
movq %rdx, 88(%r8)
sbbq 96(%rsi), %rax
movq 104(%r8), %rdx
movq %rax, 96(%r8)
sbbq 104(%rsi), %rdx
movq 112(%r8), %rax
movq %rdx, 104(%r8)
sbbq 112(%rsi), %rax
movq 120(%r8), %rdx
movq %rax, 112(%r8)
sbbq 120(%rsi), %rdx
movq %rdx, 120(%r8)
sbbq $0x00, %rcx
subq $0x100, %rsi
movq -128(%r8), %rax
subq -128(%rsi), %rax
movq -120(%r8), %rdx
movq %rax, -128(%r8)
sbbq -120(%rsi), %rdx
movq -112(%r8), %rax
movq %rdx, -120(%r8)
sbbq -112(%rsi), %rax
movq -104(%r8), %rdx
movq %rax, -112(%r8)
sbbq -104(%rsi), %rdx
movq -96(%r8), %rax
movq %rdx, -104(%r8)
sbbq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq 96(%r8), %rax
movq %rdx, 88(%r8)
sbbq 96(%rsi), %rax
movq 104(%r8), %rdx
movq %rax, 96(%r8)
sbbq 104(%rsi), %rdx
movq 112(%r8), %rax
movq %rdx, 104(%r8)
sbbq 112(%rsi), %rax
movq 120(%r8), %rdx
movq %rax, 112(%r8)
sbbq 120(%rsi), %rdx
movq %rdx, 120(%r8)
sbbq $0x00, %rcx
movq 256(%rsp), %rdi
negq %rcx
addq $0x100, %rdi
movq -128(%rdi), %rax
subq -128(%r8), %rax
movq -120(%rdi), %rdx
movq %rax, -128(%rdi)
sbbq -120(%r8), %rdx
movq -112(%rdi), %rax
movq %rdx, -120(%rdi)
sbbq -112(%r8), %rax
movq -104(%rdi), %rdx
movq %rax, -112(%rdi)
sbbq -104(%r8), %rdx
movq -96(%rdi), %rax
movq %rdx, -104(%rdi)
sbbq -96(%r8), %rax
movq -88(%rdi), %rdx
movq %rax, -96(%rdi)
sbbq -88(%r8), %rdx
movq -80(%rdi), %rax
movq %rdx, -88(%rdi)
sbbq -80(%r8), %rax
movq -72(%rdi), %rdx
movq %rax, -80(%rdi)
sbbq -72(%r8), %rdx
movq -64(%rdi), %rax
movq %rdx, -72(%rdi)
sbbq -64(%r8), %rax
movq -56(%rdi), %rdx
movq %rax, -64(%rdi)
sbbq -56(%r8), %rdx
movq -48(%rdi), %rax
movq %rdx, -56(%rdi)
sbbq -48(%r8), %rax
movq -40(%rdi), %rdx
movq %rax, -48(%rdi)
sbbq -40(%r8), %rdx
movq -32(%rdi), %rax
movq %rdx, -40(%rdi)
sbbq -32(%r8), %rax
movq -24(%rdi), %rdx
movq %rax, -32(%rdi)
sbbq -24(%r8), %rdx
movq -16(%rdi), %rax
movq %rdx, -24(%rdi)
sbbq -16(%r8), %rax
movq -8(%rdi), %rdx
movq %rax, -16(%rdi)
sbbq -8(%r8), %rdx
movq (%rdi), %rax
movq %rdx, -8(%rdi)
sbbq (%r8), %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
sbbq 8(%r8), %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
sbbq 16(%r8), %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
sbbq 24(%r8), %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
sbbq 32(%r8), %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
sbbq 40(%r8), %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
sbbq 48(%r8), %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
sbbq 56(%r8), %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
sbbq 64(%r8), %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
sbbq 72(%r8), %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
sbbq 80(%r8), %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
sbbq 88(%r8), %rdx
movq 96(%rdi), %rax
movq %rdx, 88(%rdi)
sbbq 96(%r8), %rax
movq 104(%rdi), %rdx
movq %rax, 96(%rdi)
sbbq 104(%r8), %rdx
movq 112(%rdi), %rax
movq %rdx, 104(%rdi)
sbbq 112(%r8), %rax
movq 120(%rdi), %rdx
movq %rax, 112(%rdi)
sbbq 120(%r8), %rdx
movq %rdx, 120(%rdi)
sbbq $0x00, %rcx
movq 256(%rsp), %rdi
addq $0x180, %rdi
# Add in word
movq (%rdi), %rax
addq %rcx, %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
adcq $0x00, %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
adcq $0x00, %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
adcq $0x00, %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
adcq $0x00, %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
adcq $0x00, %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
adcq $0x00, %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
adcq $0x00, %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
adcq $0x00, %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
adcq $0x00, %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
adcq $0x00, %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
adcq $0x00, %rdx
movq 96(%rdi), %rax
movq %rdx, 88(%rdi)
adcq $0x00, %rax
movq 104(%rdi), %rdx
movq %rax, 96(%rdi)
adcq $0x00, %rdx
movq 112(%rdi), %rax
movq %rdx, 104(%rdi)
adcq $0x00, %rax
movq 120(%rdi), %rdx
movq %rax, 112(%rdi)
adcq $0x00, %rdx
movq %rdx, 120(%rdi)
movq 264(%rsp), %rsi
movq 256(%rsp), %rdi
addq $0x110, %rsp
repz retq
#ifndef __APPLE__
.size sp_2048_sqr_avx2_32,.-sp_2048_sqr_avx2_32
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Sub b from a into a. (a -= b)
*
* a A single precision integer and result.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_2048_sub_in_place_16
.type sp_2048_sub_in_place_16,@function
.align 16
sp_2048_sub_in_place_16:
#else
.section __TEXT,__text
.globl _sp_2048_sub_in_place_16
.p2align 4
_sp_2048_sub_in_place_16:
#endif /* __APPLE__ */
movq (%rdi), %rdx
subq (%rsi), %rdx
movq 8(%rdi), %rcx
movq %rdx, (%rdi)
sbbq 8(%rsi), %rcx
movq 16(%rdi), %rdx
movq %rcx, 8(%rdi)
sbbq 16(%rsi), %rdx
movq 24(%rdi), %rcx
movq %rdx, 16(%rdi)
sbbq 24(%rsi), %rcx
movq 32(%rdi), %rdx
movq %rcx, 24(%rdi)
sbbq 32(%rsi), %rdx
movq 40(%rdi), %rcx
movq %rdx, 32(%rdi)
sbbq 40(%rsi), %rcx
movq 48(%rdi), %rdx
movq %rcx, 40(%rdi)
sbbq 48(%rsi), %rdx
movq 56(%rdi), %rcx
movq %rdx, 48(%rdi)
sbbq 56(%rsi), %rcx
movq 64(%rdi), %rdx
movq %rcx, 56(%rdi)
sbbq 64(%rsi), %rdx
movq 72(%rdi), %rcx
movq %rdx, 64(%rdi)
sbbq 72(%rsi), %rcx
movq 80(%rdi), %rdx
movq %rcx, 72(%rdi)
sbbq 80(%rsi), %rdx
movq 88(%rdi), %rcx
movq %rdx, 80(%rdi)
sbbq 88(%rsi), %rcx
movq 96(%rdi), %rdx
movq %rcx, 88(%rdi)
sbbq 96(%rsi), %rdx
movq 104(%rdi), %rcx
movq %rdx, 96(%rdi)
sbbq 104(%rsi), %rcx
movq 112(%rdi), %rdx
movq %rcx, 104(%rdi)
sbbq 112(%rsi), %rdx
movq 120(%rdi), %rcx
movq %rdx, 112(%rdi)
sbbq 120(%rsi), %rcx
movq %rcx, 120(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_2048_sub_in_place_16,.-sp_2048_sub_in_place_16
#endif /* __APPLE__ */
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_2048_mul_d_32
.type sp_2048_mul_d_32,@function
.align 16
sp_2048_mul_d_32:
#else
.section __TEXT,__text
.globl _sp_2048_mul_d_32
.p2align 4
_sp_2048_mul_d_32:
#endif /* __APPLE__ */
movq %rdx, %rcx
# A[0] * B
movq %rcx, %rax
xorq %r10, %r10
mulq (%rsi)
movq %rax, %r8
movq %rdx, %r9
movq %r8, (%rdi)
# A[1] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 8(%rsi)
addq %rax, %r9
movq %r9, 8(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 16(%rsi)
addq %rax, %r10
movq %r10, 16(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 24(%rsi)
addq %rax, %r8
movq %r8, 24(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 32(%rsi)
addq %rax, %r9
movq %r9, 32(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 40(%rsi)
addq %rax, %r10
movq %r10, 40(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 48(%rsi)
addq %rax, %r8
movq %r8, 48(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 56(%rsi)
addq %rax, %r9
movq %r9, 56(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 64(%rsi)
addq %rax, %r10
movq %r10, 64(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 72(%rsi)
addq %rax, %r8
movq %r8, 72(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 80(%rsi)
addq %rax, %r9
movq %r9, 80(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 88(%rsi)
addq %rax, %r10
movq %r10, 88(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 96(%rsi)
addq %rax, %r8
movq %r8, 96(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 104(%rsi)
addq %rax, %r9
movq %r9, 104(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 112(%rsi)
addq %rax, %r10
movq %r10, 112(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 120(%rsi)
addq %rax, %r8
movq %r8, 120(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[16] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 128(%rsi)
addq %rax, %r9
movq %r9, 128(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[17] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 136(%rsi)
addq %rax, %r10
movq %r10, 136(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[18] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 144(%rsi)
addq %rax, %r8
movq %r8, 144(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[19] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 152(%rsi)
addq %rax, %r9
movq %r9, 152(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[20] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 160(%rsi)
addq %rax, %r10
movq %r10, 160(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[21] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 168(%rsi)
addq %rax, %r8
movq %r8, 168(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[22] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 176(%rsi)
addq %rax, %r9
movq %r9, 176(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[23] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 184(%rsi)
addq %rax, %r10
movq %r10, 184(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[24] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 192(%rsi)
addq %rax, %r8
movq %r8, 192(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[25] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 200(%rsi)
addq %rax, %r9
movq %r9, 200(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[26] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 208(%rsi)
addq %rax, %r10
movq %r10, 208(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[27] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 216(%rsi)
addq %rax, %r8
movq %r8, 216(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[28] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 224(%rsi)
addq %rax, %r9
movq %r9, 224(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[29] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 232(%rsi)
addq %rax, %r10
movq %r10, 232(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[30] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 240(%rsi)
addq %rax, %r8
movq %r8, 240(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[31] * B
movq %rcx, %rax
mulq 248(%rsi)
addq %rax, %r9
adcq %rdx, %r10
movq %r9, 248(%rdi)
movq %r10, 256(%rdi)
repz retq
#ifndef __APPLE__
.size sp_2048_mul_d_32,.-sp_2048_mul_d_32
#endif /* __APPLE__ */
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_2048_cond_sub_16
.type sp_2048_cond_sub_16,@function
.align 16
sp_2048_cond_sub_16:
#else
.section __TEXT,__text
.globl _sp_2048_cond_sub_16
.p2align 4
_sp_2048_cond_sub_16:
#endif /* __APPLE__ */
subq $0x80, %rsp
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 80(%rsp)
movq %r9, 88(%rsp)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 96(%rsp)
movq %r9, 104(%rsp)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 112(%rsp)
movq %r9, 120(%rsp)
movq (%rsi), %r8
movq (%rsp), %rdx
subq %rdx, %r8
movq 8(%rsi), %r9
movq 8(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, (%rdi)
movq 16(%rsi), %r8
movq 16(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 8(%rdi)
movq 24(%rsi), %r9
movq 24(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 16(%rdi)
movq 32(%rsi), %r8
movq 32(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 24(%rdi)
movq 40(%rsi), %r9
movq 40(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 32(%rdi)
movq 48(%rsi), %r8
movq 48(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 40(%rdi)
movq 56(%rsi), %r9
movq 56(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 48(%rdi)
movq 64(%rsi), %r8
movq 64(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 56(%rdi)
movq 72(%rsi), %r9
movq 72(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 64(%rdi)
movq 80(%rsi), %r8
movq 80(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 72(%rdi)
movq 88(%rsi), %r9
movq 88(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 80(%rdi)
movq 96(%rsi), %r8
movq 96(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 88(%rdi)
movq 104(%rsi), %r9
movq 104(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 96(%rdi)
movq 112(%rsi), %r8
movq 112(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 104(%rdi)
movq 120(%rsi), %r9
movq 120(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
sbbq %rax, %rax
addq $0x80, %rsp
repz retq
#ifndef __APPLE__
.size sp_2048_cond_sub_16,.-sp_2048_cond_sub_16
#endif /* __APPLE__ */
/* Reduce the number back to 2048 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_2048_mont_reduce_16
.type sp_2048_mont_reduce_16,@function
.align 16
sp_2048_mont_reduce_16:
#else
.section __TEXT,__text
.globl _sp_2048_mont_reduce_16
.p2align 4
_sp_2048_mont_reduce_16:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rdx, %rcx
xorq %r15, %r15
# i = 16
movq $16, %r8
movq (%rdi), %r13
movq 8(%rdi), %r14
L_2048_mont_reduce_16_loop:
# mu = a[i] * mp
movq %r13, %r11
imulq %rcx, %r11
# a[i+0] += m[0] * mu
movq %r11, %rax
xorq %r10, %r10
mulq (%rsi)
addq %rax, %r13
adcq %rdx, %r10
# a[i+1] += m[1] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 8(%rsi)
movq %r14, %r13
addq %rax, %r13
adcq %rdx, %r9
addq %r10, %r13
adcq $0x00, %r9
# a[i+2] += m[2] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 16(%rsi)
movq 16(%rdi), %r14
addq %rax, %r14
adcq %rdx, %r10
addq %r9, %r14
adcq $0x00, %r10
# a[i+3] += m[3] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 24(%rsi)
movq 24(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 24(%rdi)
adcq $0x00, %r9
# a[i+4] += m[4] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 32(%rsi)
movq 32(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 32(%rdi)
adcq $0x00, %r10
# a[i+5] += m[5] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 40(%rsi)
movq 40(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 40(%rdi)
adcq $0x00, %r9
# a[i+6] += m[6] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 48(%rsi)
movq 48(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 48(%rdi)
adcq $0x00, %r10
# a[i+7] += m[7] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 56(%rsi)
movq 56(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 56(%rdi)
adcq $0x00, %r9
# a[i+8] += m[8] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 64(%rsi)
movq 64(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 64(%rdi)
adcq $0x00, %r10
# a[i+9] += m[9] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 72(%rsi)
movq 72(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 72(%rdi)
adcq $0x00, %r9
# a[i+10] += m[10] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 80(%rsi)
movq 80(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 80(%rdi)
adcq $0x00, %r10
# a[i+11] += m[11] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 88(%rsi)
movq 88(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 88(%rdi)
adcq $0x00, %r9
# a[i+12] += m[12] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 96(%rsi)
movq 96(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 96(%rdi)
adcq $0x00, %r10
# a[i+13] += m[13] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 104(%rsi)
movq 104(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 104(%rdi)
adcq $0x00, %r9
# a[i+14] += m[14] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 112(%rsi)
movq 112(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 112(%rdi)
adcq $0x00, %r10
# a[i+15] += m[15] * mu
movq %r11, %rax
mulq 120(%rsi)
movq 120(%rdi), %r12
addq %rax, %r10
adcq %r15, %rdx
movq $0x00, %r15
adcq $0x00, %r15
addq %r10, %r12
movq %r12, 120(%rdi)
adcq %rdx, 128(%rdi)
adcq $0x00, %r15
# i -= 1
addq $8, %rdi
decq %r8
jnz L_2048_mont_reduce_16_loop
movq %r13, (%rdi)
movq %r14, 8(%rdi)
negq %r15
#ifdef _WIN64
movq %rsi, %rdx
movq %r15, %rcx
#else
movq %r15, %rcx
movq %rsi, %rdx
#endif /* _WIN64 */
movq %rdi, %rsi
movq %rdi, %rdi
subq $0x80, %rdi
#ifndef __APPLE__
callq sp_2048_cond_sub_16@plt
#else
callq _sp_2048_cond_sub_16
#endif /* __APPLE__ */
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_2048_mont_reduce_16,.-sp_2048_mont_reduce_16
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_2048_cond_sub_avx2_16
.type sp_2048_cond_sub_avx2_16,@function
.align 16
sp_2048_cond_sub_avx2_16:
#else
.section __TEXT,__text
.globl _sp_2048_cond_sub_avx2_16
.p2align 4
_sp_2048_cond_sub_avx2_16:
#endif /* __APPLE__ */
movq (%rdx), %r10
movq (%rsi), %r8
pextq %rcx, %r10, %r10
subq %r10, %r8
movq 8(%rdx), %r10
movq 8(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, (%rdi)
sbbq %r10, %r9
movq 16(%rdx), %r8
movq 16(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 8(%rdi)
sbbq %r8, %r10
movq 24(%rdx), %r9
movq 24(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 16(%rdi)
sbbq %r9, %r8
movq 32(%rdx), %r10
movq 32(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 24(%rdi)
sbbq %r10, %r9
movq 40(%rdx), %r8
movq 40(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 32(%rdi)
sbbq %r8, %r10
movq 48(%rdx), %r9
movq 48(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 40(%rdi)
sbbq %r9, %r8
movq 56(%rdx), %r10
movq 56(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 48(%rdi)
sbbq %r10, %r9
movq 64(%rdx), %r8
movq 64(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 56(%rdi)
sbbq %r8, %r10
movq 72(%rdx), %r9
movq 72(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 64(%rdi)
sbbq %r9, %r8
movq 80(%rdx), %r10
movq 80(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 72(%rdi)
sbbq %r10, %r9
movq 88(%rdx), %r8
movq 88(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 80(%rdi)
sbbq %r8, %r10
movq 96(%rdx), %r9
movq 96(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 88(%rdi)
sbbq %r9, %r8
movq 104(%rdx), %r10
movq 104(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 96(%rdi)
sbbq %r10, %r9
movq 112(%rdx), %r8
movq 112(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 104(%rdi)
sbbq %r8, %r10
movq 120(%rdx), %r9
movq 120(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 112(%rdi)
sbbq %r9, %r8
movq %r8, 120(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_2048_cond_sub_avx2_16,.-sp_2048_cond_sub_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_2048_mul_d_16
.type sp_2048_mul_d_16,@function
.align 16
sp_2048_mul_d_16:
#else
.section __TEXT,__text
.globl _sp_2048_mul_d_16
.p2align 4
_sp_2048_mul_d_16:
#endif /* __APPLE__ */
movq %rdx, %rcx
# A[0] * B
movq %rcx, %rax
xorq %r10, %r10
mulq (%rsi)
movq %rax, %r8
movq %rdx, %r9
movq %r8, (%rdi)
# A[1] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 8(%rsi)
addq %rax, %r9
movq %r9, 8(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 16(%rsi)
addq %rax, %r10
movq %r10, 16(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 24(%rsi)
addq %rax, %r8
movq %r8, 24(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 32(%rsi)
addq %rax, %r9
movq %r9, 32(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 40(%rsi)
addq %rax, %r10
movq %r10, 40(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 48(%rsi)
addq %rax, %r8
movq %r8, 48(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 56(%rsi)
addq %rax, %r9
movq %r9, 56(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 64(%rsi)
addq %rax, %r10
movq %r10, 64(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 72(%rsi)
addq %rax, %r8
movq %r8, 72(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 80(%rsi)
addq %rax, %r9
movq %r9, 80(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 88(%rsi)
addq %rax, %r10
movq %r10, 88(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 96(%rsi)
addq %rax, %r8
movq %r8, 96(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 104(%rsi)
addq %rax, %r9
movq %r9, 104(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 112(%rsi)
addq %rax, %r10
movq %r10, 112(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B
movq %rcx, %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
movq %r8, 120(%rdi)
movq %r9, 128(%rdi)
repz retq
#ifndef __APPLE__
.size sp_2048_mul_d_16,.-sp_2048_mul_d_16
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_2048_mul_d_avx2_16
.type sp_2048_mul_d_avx2_16,@function
.align 16
sp_2048_mul_d_avx2_16:
#else
.section __TEXT,__text
.globl _sp_2048_mul_d_avx2_16
.p2align 4
_sp_2048_mul_d_avx2_16:
#endif /* __APPLE__ */
movq %rdx, %rax
# A[0] * B
movq %rax, %rdx
xorq %r11, %r11
mulxq (%rsi), %r9, %r10
movq %r9, (%rdi)
# A[1] * B
mulxq 8(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 8(%rdi)
# A[2] * B
mulxq 16(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 16(%rdi)
# A[3] * B
mulxq 24(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 24(%rdi)
# A[4] * B
mulxq 32(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 32(%rdi)
# A[5] * B
mulxq 40(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 40(%rdi)
# A[6] * B
mulxq 48(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 48(%rdi)
# A[7] * B
mulxq 56(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 56(%rdi)
# A[8] * B
mulxq 64(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 64(%rdi)
# A[9] * B
mulxq 72(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 72(%rdi)
# A[10] * B
mulxq 80(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 80(%rdi)
# A[11] * B
mulxq 88(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 88(%rdi)
# A[12] * B
mulxq 96(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 96(%rdi)
# A[13] * B
mulxq 104(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 104(%rdi)
# A[14] * B
mulxq 112(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 112(%rdi)
# A[15] * B
mulxq 120(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
adcxq %r11, %r9
movq %r10, 120(%rdi)
movq %r9, 128(%rdi)
repz retq
#ifndef __APPLE__
.size sp_2048_mul_d_avx2_16,.-sp_2048_mul_d_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef _WIN64
/* Divide the double width number (d1|d0) by the dividend. (d1|d0 / div)
*
* d1 The high order half of the number to divide.
* d0 The low order half of the number to divide.
* div The dividend.
* returns the result of the division.
*/
#ifndef __APPLE__
.text
.globl div_2048_word_asm_16
.type div_2048_word_asm_16,@function
.align 16
div_2048_word_asm_16:
#else
.section __TEXT,__text
.globl _div_2048_word_asm_16
.p2align 4
_div_2048_word_asm_16:
#endif /* __APPLE__ */
movq %rdx, %rcx
movq %rsi, %rax
movq %rdi, %rdx
divq %rcx
repz retq
#ifndef __APPLE__
.size div_2048_word_asm_16,.-div_2048_word_asm_16
#endif /* __APPLE__ */
#endif /* _WIN64 */
/* Compare a with b in constant time.
*
* a A single precision integer.
* b A single precision integer.
* return -ve, 0 or +ve if a is less than, equal to or greater than b
* respectively.
*/
#ifndef __APPLE__
.text
.globl sp_2048_cmp_16
.type sp_2048_cmp_16,@function
.align 16
sp_2048_cmp_16:
#else
.section __TEXT,__text
.globl _sp_2048_cmp_16
.p2align 4
_sp_2048_cmp_16:
#endif /* __APPLE__ */
xorq %rcx, %rcx
movq $-1, %rdx
movq $-1, %rax
movq $0x01, %r8
movq 120(%rdi), %r9
movq 120(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 112(%rdi), %r9
movq 112(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 104(%rdi), %r9
movq 104(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 96(%rdi), %r9
movq 96(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 88(%rdi), %r9
movq 88(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 80(%rdi), %r9
movq 80(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 72(%rdi), %r9
movq 72(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 64(%rdi), %r9
movq 64(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 56(%rdi), %r9
movq 56(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 48(%rdi), %r9
movq 48(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 40(%rdi), %r9
movq 40(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 32(%rdi), %r9
movq 32(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 24(%rdi), %r9
movq 24(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 16(%rdi), %r9
movq 16(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 8(%rdi), %r9
movq 8(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq (%rdi), %r9
movq (%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
xorq %rdx, %rax
repz retq
#ifndef __APPLE__
.size sp_2048_cmp_16,.-sp_2048_cmp_16
#endif /* __APPLE__ */
#ifndef WC_NO_CACHE_RESISTANT
#ifndef __APPLE__
.text
.globl sp_2048_get_from_table_16
.type sp_2048_get_from_table_16,@function
.align 16
sp_2048_get_from_table_16:
#else
.section __TEXT,__text
.globl _sp_2048_get_from_table_16
.p2align 4
_sp_2048_get_from_table_16:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %rdx, %xmm10
movd %rax, %xmm11
pxor %xmm13, %xmm13
pshufd $0x00, %xmm11, %xmm11
pshufd $0x00, %xmm10, %xmm10
# START: 0-7
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 16
movq 128(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 17
movq 136(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 18
movq 144(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 19
movq 152(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 20
movq 160(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 21
movq 168(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 22
movq 176(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 23
movq 184(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 24
movq 192(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 25
movq 200(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 26
movq 208(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 27
movq 216(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 28
movq 224(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 29
movq 232(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 30
movq 240(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 31
movq 248(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 0-7
# START: 8-15
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 16
movq 128(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 17
movq 136(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 18
movq 144(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 19
movq 152(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 20
movq 160(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 21
movq 168(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 22
movq 176(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 23
movq 184(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 24
movq 192(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 25
movq 200(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 26
movq 208(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 27
movq 216(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 28
movq 224(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 29
movq 232(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 30
movq 240(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 31
movq 248(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
# END: 8-15
repz retq
#ifndef __APPLE__
.size sp_2048_get_from_table_16,.-sp_2048_get_from_table_16
#endif /* __APPLE__ */
#endif /* !WC_NO_CACHE_RESISTANT */
#ifdef HAVE_INTEL_AVX2
/* Reduce the number back to 2048 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_2048_mont_reduce_avx2_16
.type sp_2048_mont_reduce_avx2_16,@function
.align 16
sp_2048_mont_reduce_avx2_16:
#else
.section __TEXT,__text
.globl _sp_2048_mont_reduce_avx2_16
.p2align 4
_sp_2048_mont_reduce_avx2_16:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %r8
xorq %rbp, %rbp
# i = 16
movq $16, %r9
movq (%rdi), %r12
movq 8(%rdi), %r13
movq 16(%rdi), %r14
movq 24(%rdi), %r15
addq $0x40, %rdi
xorq %rbp, %rbp
L_2048_mont_reduce_avx2_16_loop:
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r10
imulq %r8, %rdx
xorq %rbx, %rbx
# a[i+0] += m[0] * mu
mulxq (%rsi), %rax, %rcx
movq %r13, %r12
adcxq %rax, %r10
adoxq %rcx, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rax, %rcx
movq %r14, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rax, %rcx
movq %r15, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rax, %rcx
movq -32(%rdi), %r15
adcxq %rax, %r14
adoxq %rcx, %r15
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rax, %rcx
movq -24(%rdi), %r11
adcxq %rax, %r15
adoxq %rcx, %r11
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rax, %rcx
movq -16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -24(%rdi)
# a[i+6] += m[6] * mu
mulxq 48(%rsi), %rax, %rcx
movq -8(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -16(%rdi)
# a[i+7] += m[7] * mu
mulxq 56(%rsi), %rax, %rcx
movq (%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -8(%rdi)
# a[i+8] += m[8] * mu
mulxq 64(%rsi), %rax, %rcx
movq 8(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, (%rdi)
# a[i+9] += m[9] * mu
mulxq 72(%rsi), %rax, %rcx
movq 16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 8(%rdi)
# a[i+10] += m[10] * mu
mulxq 80(%rsi), %rax, %rcx
movq 24(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 16(%rdi)
# a[i+11] += m[11] * mu
mulxq 88(%rsi), %rax, %rcx
movq 32(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 24(%rdi)
# a[i+12] += m[12] * mu
mulxq 96(%rsi), %rax, %rcx
movq 40(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 32(%rdi)
# a[i+13] += m[13] * mu
mulxq 104(%rsi), %rax, %rcx
movq 48(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 40(%rdi)
# a[i+14] += m[14] * mu
mulxq 112(%rsi), %rax, %rcx
movq 56(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rdi)
# a[i+15] += m[15] * mu
mulxq 120(%rsi), %rax, %rcx
movq 64(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 56(%rdi)
adcxq %rbp, %r10
movq %rbx, %rbp
movq %r10, 64(%rdi)
adoxq %rbx, %rbp
adcxq %rbx, %rbp
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r10
imulq %r8, %rdx
xorq %rbx, %rbx
# a[i+0] += m[0] * mu
mulxq (%rsi), %rax, %rcx
movq %r13, %r12
adcxq %rax, %r10
adoxq %rcx, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rax, %rcx
movq %r14, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rax, %rcx
movq %r15, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rax, %rcx
movq -24(%rdi), %r15
adcxq %rax, %r14
adoxq %rcx, %r15
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rax, %rcx
movq -16(%rdi), %r11
adcxq %rax, %r15
adoxq %rcx, %r11
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rax, %rcx
movq -8(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -16(%rdi)
# a[i+6] += m[6] * mu
mulxq 48(%rsi), %rax, %rcx
movq (%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -8(%rdi)
# a[i+7] += m[7] * mu
mulxq 56(%rsi), %rax, %rcx
movq 8(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, (%rdi)
# a[i+8] += m[8] * mu
mulxq 64(%rsi), %rax, %rcx
movq 16(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 8(%rdi)
# a[i+9] += m[9] * mu
mulxq 72(%rsi), %rax, %rcx
movq 24(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 16(%rdi)
# a[i+10] += m[10] * mu
mulxq 80(%rsi), %rax, %rcx
movq 32(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 24(%rdi)
# a[i+11] += m[11] * mu
mulxq 88(%rsi), %rax, %rcx
movq 40(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 32(%rdi)
# a[i+12] += m[12] * mu
mulxq 96(%rsi), %rax, %rcx
movq 48(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 40(%rdi)
# a[i+13] += m[13] * mu
mulxq 104(%rsi), %rax, %rcx
movq 56(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 48(%rdi)
# a[i+14] += m[14] * mu
mulxq 112(%rsi), %rax, %rcx
movq 64(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 56(%rdi)
# a[i+15] += m[15] * mu
mulxq 120(%rsi), %rax, %rcx
movq 72(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 64(%rdi)
adcxq %rbp, %r10
movq %rbx, %rbp
movq %r10, 72(%rdi)
adoxq %rbx, %rbp
adcxq %rbx, %rbp
# a += 2
addq $16, %rdi
# i -= 2
subq $2, %r9
jnz L_2048_mont_reduce_avx2_16_loop
subq $0x40, %rdi
negq %rbp
movq %rdi, %r8
subq $0x80, %rdi
movq (%rsi), %rcx
movq %r12, %rdx
pextq %rbp, %rcx, %rcx
subq %rcx, %rdx
movq 8(%rsi), %rcx
movq %r13, %rax
pextq %rbp, %rcx, %rcx
movq %rdx, (%rdi)
sbbq %rcx, %rax
movq 16(%rsi), %rdx
movq %r14, %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 8(%rdi)
sbbq %rdx, %rcx
movq 24(%rsi), %rax
movq %r15, %rdx
pextq %rbp, %rax, %rax
movq %rcx, 16(%rdi)
sbbq %rax, %rdx
movq 32(%rsi), %rcx
movq 32(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 24(%rdi)
sbbq %rcx, %rax
movq 40(%rsi), %rdx
movq 40(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 32(%rdi)
sbbq %rdx, %rcx
movq 48(%rsi), %rax
movq 48(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 40(%rdi)
sbbq %rax, %rdx
movq 56(%rsi), %rcx
movq 56(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 48(%rdi)
sbbq %rcx, %rax
movq 64(%rsi), %rdx
movq 64(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 56(%rdi)
sbbq %rdx, %rcx
movq 72(%rsi), %rax
movq 72(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 64(%rdi)
sbbq %rax, %rdx
movq 80(%rsi), %rcx
movq 80(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 72(%rdi)
sbbq %rcx, %rax
movq 88(%rsi), %rdx
movq 88(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 80(%rdi)
sbbq %rdx, %rcx
movq 96(%rsi), %rax
movq 96(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 88(%rdi)
sbbq %rax, %rdx
movq 104(%rsi), %rcx
movq 104(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 96(%rdi)
sbbq %rcx, %rax
movq 112(%rsi), %rdx
movq 112(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 104(%rdi)
sbbq %rdx, %rcx
movq 120(%rsi), %rax
movq 120(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 112(%rdi)
sbbq %rax, %rdx
movq %rdx, 120(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_2048_mont_reduce_avx2_16,.-sp_2048_mont_reduce_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifndef WC_NO_CACHE_RESISTANT
#ifndef __APPLE__
.text
.globl sp_2048_get_from_table_avx2_16
.type sp_2048_get_from_table_avx2_16,@function
.align 16
sp_2048_get_from_table_avx2_16:
#else
.section __TEXT,__text
.globl _sp_2048_get_from_table_avx2_16
.p2align 4
_sp_2048_get_from_table_avx2_16:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %rdx, %xmm10
movd %rax, %xmm11
vpxor %ymm13, %ymm13, %ymm13
vpermd %ymm10, %ymm13, %ymm10
vpermd %ymm11, %ymm13, %ymm11
# START: 0-15
vpxor %ymm13, %ymm13, %ymm13
vpxor %ymm4, %ymm4, %ymm4
vpxor %ymm5, %ymm5, %ymm5
vpxor %ymm6, %ymm6, %ymm6
vpxor %ymm7, %ymm7, %ymm7
# ENTRY: 0
movq (%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 1
movq 8(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 2
movq 16(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 3
movq 24(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 4
movq 32(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 5
movq 40(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 6
movq 48(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 7
movq 56(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 8
movq 64(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 9
movq 72(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 10
movq 80(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 11
movq 88(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 12
movq 96(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 13
movq 104(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 14
movq 112(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 15
movq 120(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 16
movq 128(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 17
movq 136(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 18
movq 144(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 19
movq 152(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 20
movq 160(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 21
movq 168(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 22
movq 176(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 23
movq 184(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 24
movq 192(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 25
movq 200(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 26
movq 208(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 27
movq 216(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 28
movq 224(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 29
movq 232(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 30
movq 240(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 31
movq 248(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
vmovdqu %ymm4, (%rdi)
vmovdqu %ymm5, 32(%rdi)
vmovdqu %ymm6, 64(%rdi)
vmovdqu %ymm7, 96(%rdi)
# END: 0-15
repz retq
#ifndef __APPLE__
.size sp_2048_get_from_table_avx2_16,.-sp_2048_get_from_table_avx2_16
#endif /* __APPLE__ */
#endif /* !WC_NO_CACHE_RESISTANT */
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_2048_cond_sub_32
.type sp_2048_cond_sub_32,@function
.align 16
sp_2048_cond_sub_32:
#else
.section __TEXT,__text
.globl _sp_2048_cond_sub_32
.p2align 4
_sp_2048_cond_sub_32:
#endif /* __APPLE__ */
subq $0x100, %rsp
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 80(%rsp)
movq %r9, 88(%rsp)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 96(%rsp)
movq %r9, 104(%rsp)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 112(%rsp)
movq %r9, 120(%rsp)
movq 128(%rdx), %r8
movq 136(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 128(%rsp)
movq %r9, 136(%rsp)
movq 144(%rdx), %r8
movq 152(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 144(%rsp)
movq %r9, 152(%rsp)
movq 160(%rdx), %r8
movq 168(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 160(%rsp)
movq %r9, 168(%rsp)
movq 176(%rdx), %r8
movq 184(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 176(%rsp)
movq %r9, 184(%rsp)
movq 192(%rdx), %r8
movq 200(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 192(%rsp)
movq %r9, 200(%rsp)
movq 208(%rdx), %r8
movq 216(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 208(%rsp)
movq %r9, 216(%rsp)
movq 224(%rdx), %r8
movq 232(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 224(%rsp)
movq %r9, 232(%rsp)
movq 240(%rdx), %r8
movq 248(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 240(%rsp)
movq %r9, 248(%rsp)
movq (%rsi), %r8
movq (%rsp), %rdx
subq %rdx, %r8
movq 8(%rsi), %r9
movq 8(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, (%rdi)
movq 16(%rsi), %r8
movq 16(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 8(%rdi)
movq 24(%rsi), %r9
movq 24(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 16(%rdi)
movq 32(%rsi), %r8
movq 32(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 24(%rdi)
movq 40(%rsi), %r9
movq 40(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 32(%rdi)
movq 48(%rsi), %r8
movq 48(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 40(%rdi)
movq 56(%rsi), %r9
movq 56(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 48(%rdi)
movq 64(%rsi), %r8
movq 64(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 56(%rdi)
movq 72(%rsi), %r9
movq 72(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 64(%rdi)
movq 80(%rsi), %r8
movq 80(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 72(%rdi)
movq 88(%rsi), %r9
movq 88(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 80(%rdi)
movq 96(%rsi), %r8
movq 96(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 88(%rdi)
movq 104(%rsi), %r9
movq 104(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 96(%rdi)
movq 112(%rsi), %r8
movq 112(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 104(%rdi)
movq 120(%rsi), %r9
movq 120(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 112(%rdi)
movq 128(%rsi), %r8
movq 128(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 120(%rdi)
movq 136(%rsi), %r9
movq 136(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 128(%rdi)
movq 144(%rsi), %r8
movq 144(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 136(%rdi)
movq 152(%rsi), %r9
movq 152(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 144(%rdi)
movq 160(%rsi), %r8
movq 160(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 152(%rdi)
movq 168(%rsi), %r9
movq 168(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 160(%rdi)
movq 176(%rsi), %r8
movq 176(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 168(%rdi)
movq 184(%rsi), %r9
movq 184(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 176(%rdi)
movq 192(%rsi), %r8
movq 192(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 184(%rdi)
movq 200(%rsi), %r9
movq 200(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 192(%rdi)
movq 208(%rsi), %r8
movq 208(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 200(%rdi)
movq 216(%rsi), %r9
movq 216(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 208(%rdi)
movq 224(%rsi), %r8
movq 224(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 216(%rdi)
movq 232(%rsi), %r9
movq 232(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 224(%rdi)
movq 240(%rsi), %r8
movq 240(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 232(%rdi)
movq 248(%rsi), %r9
movq 248(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 240(%rdi)
movq %r9, 248(%rdi)
sbbq %rax, %rax
addq $0x100, %rsp
repz retq
#ifndef __APPLE__
.size sp_2048_cond_sub_32,.-sp_2048_cond_sub_32
#endif /* __APPLE__ */
/* Reduce the number back to 2048 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_2048_mont_reduce_32
.type sp_2048_mont_reduce_32,@function
.align 16
sp_2048_mont_reduce_32:
#else
.section __TEXT,__text
.globl _sp_2048_mont_reduce_32
.p2align 4
_sp_2048_mont_reduce_32:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rdx, %rcx
xorq %r15, %r15
# i = 32
movq $32, %r8
movq (%rdi), %r13
movq 8(%rdi), %r14
L_2048_mont_reduce_32_loop:
# mu = a[i] * mp
movq %r13, %r11
imulq %rcx, %r11
# a[i+0] += m[0] * mu
movq %r11, %rax
xorq %r10, %r10
mulq (%rsi)
addq %rax, %r13
adcq %rdx, %r10
# a[i+1] += m[1] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 8(%rsi)
movq %r14, %r13
addq %rax, %r13
adcq %rdx, %r9
addq %r10, %r13
adcq $0x00, %r9
# a[i+2] += m[2] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 16(%rsi)
movq 16(%rdi), %r14
addq %rax, %r14
adcq %rdx, %r10
addq %r9, %r14
adcq $0x00, %r10
# a[i+3] += m[3] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 24(%rsi)
movq 24(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 24(%rdi)
adcq $0x00, %r9
# a[i+4] += m[4] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 32(%rsi)
movq 32(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 32(%rdi)
adcq $0x00, %r10
# a[i+5] += m[5] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 40(%rsi)
movq 40(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 40(%rdi)
adcq $0x00, %r9
# a[i+6] += m[6] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 48(%rsi)
movq 48(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 48(%rdi)
adcq $0x00, %r10
# a[i+7] += m[7] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 56(%rsi)
movq 56(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 56(%rdi)
adcq $0x00, %r9
# a[i+8] += m[8] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 64(%rsi)
movq 64(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 64(%rdi)
adcq $0x00, %r10
# a[i+9] += m[9] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 72(%rsi)
movq 72(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 72(%rdi)
adcq $0x00, %r9
# a[i+10] += m[10] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 80(%rsi)
movq 80(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 80(%rdi)
adcq $0x00, %r10
# a[i+11] += m[11] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 88(%rsi)
movq 88(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 88(%rdi)
adcq $0x00, %r9
# a[i+12] += m[12] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 96(%rsi)
movq 96(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 96(%rdi)
adcq $0x00, %r10
# a[i+13] += m[13] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 104(%rsi)
movq 104(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 104(%rdi)
adcq $0x00, %r9
# a[i+14] += m[14] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 112(%rsi)
movq 112(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 112(%rdi)
adcq $0x00, %r10
# a[i+15] += m[15] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 120(%rsi)
movq 120(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 120(%rdi)
adcq $0x00, %r9
# a[i+16] += m[16] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 128(%rsi)
movq 128(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 128(%rdi)
adcq $0x00, %r10
# a[i+17] += m[17] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 136(%rsi)
movq 136(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 136(%rdi)
adcq $0x00, %r9
# a[i+18] += m[18] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 144(%rsi)
movq 144(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 144(%rdi)
adcq $0x00, %r10
# a[i+19] += m[19] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 152(%rsi)
movq 152(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 152(%rdi)
adcq $0x00, %r9
# a[i+20] += m[20] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 160(%rsi)
movq 160(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 160(%rdi)
adcq $0x00, %r10
# a[i+21] += m[21] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 168(%rsi)
movq 168(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 168(%rdi)
adcq $0x00, %r9
# a[i+22] += m[22] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 176(%rsi)
movq 176(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 176(%rdi)
adcq $0x00, %r10
# a[i+23] += m[23] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 184(%rsi)
movq 184(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 184(%rdi)
adcq $0x00, %r9
# a[i+24] += m[24] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 192(%rsi)
movq 192(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 192(%rdi)
adcq $0x00, %r10
# a[i+25] += m[25] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 200(%rsi)
movq 200(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 200(%rdi)
adcq $0x00, %r9
# a[i+26] += m[26] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 208(%rsi)
movq 208(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 208(%rdi)
adcq $0x00, %r10
# a[i+27] += m[27] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 216(%rsi)
movq 216(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 216(%rdi)
adcq $0x00, %r9
# a[i+28] += m[28] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 224(%rsi)
movq 224(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 224(%rdi)
adcq $0x00, %r10
# a[i+29] += m[29] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 232(%rsi)
movq 232(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 232(%rdi)
adcq $0x00, %r9
# a[i+30] += m[30] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 240(%rsi)
movq 240(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 240(%rdi)
adcq $0x00, %r10
# a[i+31] += m[31] * mu
movq %r11, %rax
mulq 248(%rsi)
movq 248(%rdi), %r12
addq %rax, %r10
adcq %r15, %rdx
movq $0x00, %r15
adcq $0x00, %r15
addq %r10, %r12
movq %r12, 248(%rdi)
adcq %rdx, 256(%rdi)
adcq $0x00, %r15
# i -= 1
addq $8, %rdi
decq %r8
jnz L_2048_mont_reduce_32_loop
movq %r13, (%rdi)
movq %r14, 8(%rdi)
negq %r15
#ifdef _WIN64
movq %rsi, %rdx
movq %r15, %rcx
#else
movq %r15, %rcx
movq %rsi, %rdx
#endif /* _WIN64 */
movq %rdi, %rsi
movq %rdi, %rdi
subq $0x100, %rdi
#ifndef __APPLE__
callq sp_2048_cond_sub_32@plt
#else
callq _sp_2048_cond_sub_32
#endif /* __APPLE__ */
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_2048_mont_reduce_32,.-sp_2048_mont_reduce_32
#endif /* __APPLE__ */
/* Sub b from a into r. (r = a - b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_2048_sub_32
.type sp_2048_sub_32,@function
.align 16
sp_2048_sub_32:
#else
.section __TEXT,__text
.globl _sp_2048_sub_32
.p2align 4
_sp_2048_sub_32:
#endif /* __APPLE__ */
movq (%rsi), %rcx
subq (%rdx), %rcx
movq 8(%rsi), %r8
movq %rcx, (%rdi)
sbbq 8(%rdx), %r8
movq 16(%rsi), %rcx
movq %r8, 8(%rdi)
sbbq 16(%rdx), %rcx
movq 24(%rsi), %r8
movq %rcx, 16(%rdi)
sbbq 24(%rdx), %r8
movq 32(%rsi), %rcx
movq %r8, 24(%rdi)
sbbq 32(%rdx), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%rdi)
sbbq 40(%rdx), %r8
movq 48(%rsi), %rcx
movq %r8, 40(%rdi)
sbbq 48(%rdx), %rcx
movq 56(%rsi), %r8
movq %rcx, 48(%rdi)
sbbq 56(%rdx), %r8
movq 64(%rsi), %rcx
movq %r8, 56(%rdi)
sbbq 64(%rdx), %rcx
movq 72(%rsi), %r8
movq %rcx, 64(%rdi)
sbbq 72(%rdx), %r8
movq 80(%rsi), %rcx
movq %r8, 72(%rdi)
sbbq 80(%rdx), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%rdi)
sbbq 88(%rdx), %r8
movq 96(%rsi), %rcx
movq %r8, 88(%rdi)
sbbq 96(%rdx), %rcx
movq 104(%rsi), %r8
movq %rcx, 96(%rdi)
sbbq 104(%rdx), %r8
movq 112(%rsi), %rcx
movq %r8, 104(%rdi)
sbbq 112(%rdx), %rcx
movq 120(%rsi), %r8
movq %rcx, 112(%rdi)
sbbq 120(%rdx), %r8
movq 128(%rsi), %rcx
movq %r8, 120(%rdi)
sbbq 128(%rdx), %rcx
movq 136(%rsi), %r8
movq %rcx, 128(%rdi)
sbbq 136(%rdx), %r8
movq 144(%rsi), %rcx
movq %r8, 136(%rdi)
sbbq 144(%rdx), %rcx
movq 152(%rsi), %r8
movq %rcx, 144(%rdi)
sbbq 152(%rdx), %r8
movq 160(%rsi), %rcx
movq %r8, 152(%rdi)
sbbq 160(%rdx), %rcx
movq 168(%rsi), %r8
movq %rcx, 160(%rdi)
sbbq 168(%rdx), %r8
movq 176(%rsi), %rcx
movq %r8, 168(%rdi)
sbbq 176(%rdx), %rcx
movq 184(%rsi), %r8
movq %rcx, 176(%rdi)
sbbq 184(%rdx), %r8
movq 192(%rsi), %rcx
movq %r8, 184(%rdi)
sbbq 192(%rdx), %rcx
movq 200(%rsi), %r8
movq %rcx, 192(%rdi)
sbbq 200(%rdx), %r8
movq 208(%rsi), %rcx
movq %r8, 200(%rdi)
sbbq 208(%rdx), %rcx
movq 216(%rsi), %r8
movq %rcx, 208(%rdi)
sbbq 216(%rdx), %r8
movq 224(%rsi), %rcx
movq %r8, 216(%rdi)
sbbq 224(%rdx), %rcx
movq 232(%rsi), %r8
movq %rcx, 224(%rdi)
sbbq 232(%rdx), %r8
movq 240(%rsi), %rcx
movq %r8, 232(%rdi)
sbbq 240(%rdx), %rcx
movq 248(%rsi), %r8
movq %rcx, 240(%rdi)
sbbq 248(%rdx), %r8
movq %r8, 248(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_2048_sub_32,.-sp_2048_sub_32
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_2048_mul_d_avx2_32
.type sp_2048_mul_d_avx2_32,@function
.align 16
sp_2048_mul_d_avx2_32:
#else
.section __TEXT,__text
.globl _sp_2048_mul_d_avx2_32
.p2align 4
_sp_2048_mul_d_avx2_32:
#endif /* __APPLE__ */
movq %rdx, %rax
# A[0] * B
movq %rax, %rdx
xorq %r11, %r11
mulxq (%rsi), %r9, %r10
movq %r9, (%rdi)
# A[1] * B
mulxq 8(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 8(%rdi)
# A[2] * B
mulxq 16(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 16(%rdi)
# A[3] * B
mulxq 24(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 24(%rdi)
# A[4] * B
mulxq 32(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 32(%rdi)
# A[5] * B
mulxq 40(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 40(%rdi)
# A[6] * B
mulxq 48(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 48(%rdi)
# A[7] * B
mulxq 56(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 56(%rdi)
# A[8] * B
mulxq 64(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 64(%rdi)
# A[9] * B
mulxq 72(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 72(%rdi)
# A[10] * B
mulxq 80(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 80(%rdi)
# A[11] * B
mulxq 88(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 88(%rdi)
# A[12] * B
mulxq 96(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 96(%rdi)
# A[13] * B
mulxq 104(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 104(%rdi)
# A[14] * B
mulxq 112(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 112(%rdi)
# A[15] * B
mulxq 120(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 120(%rdi)
# A[16] * B
mulxq 128(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 128(%rdi)
# A[17] * B
mulxq 136(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 136(%rdi)
# A[18] * B
mulxq 144(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 144(%rdi)
# A[19] * B
mulxq 152(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 152(%rdi)
# A[20] * B
mulxq 160(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 160(%rdi)
# A[21] * B
mulxq 168(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 168(%rdi)
# A[22] * B
mulxq 176(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 176(%rdi)
# A[23] * B
mulxq 184(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 184(%rdi)
# A[24] * B
mulxq 192(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 192(%rdi)
# A[25] * B
mulxq 200(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 200(%rdi)
# A[26] * B
mulxq 208(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 208(%rdi)
# A[27] * B
mulxq 216(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 216(%rdi)
# A[28] * B
mulxq 224(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 224(%rdi)
# A[29] * B
mulxq 232(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 232(%rdi)
# A[30] * B
mulxq 240(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 240(%rdi)
# A[31] * B
mulxq 248(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
adcxq %r11, %r9
movq %r10, 248(%rdi)
movq %r9, 256(%rdi)
repz retq
#ifndef __APPLE__
.size sp_2048_mul_d_avx2_32,.-sp_2048_mul_d_avx2_32
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef _WIN64
/* Divide the double width number (d1|d0) by the dividend. (d1|d0 / div)
*
* d1 The high order half of the number to divide.
* d0 The low order half of the number to divide.
* div The dividend.
* returns the result of the division.
*/
#ifndef __APPLE__
.text
.globl div_2048_word_asm_32
.type div_2048_word_asm_32,@function
.align 16
div_2048_word_asm_32:
#else
.section __TEXT,__text
.globl _div_2048_word_asm_32
.p2align 4
_div_2048_word_asm_32:
#endif /* __APPLE__ */
movq %rdx, %rcx
movq %rsi, %rax
movq %rdi, %rdx
divq %rcx
repz retq
#ifndef __APPLE__
.size div_2048_word_asm_32,.-div_2048_word_asm_32
#endif /* __APPLE__ */
#endif /* _WIN64 */
#ifdef HAVE_INTEL_AVX2
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_2048_cond_sub_avx2_32
.type sp_2048_cond_sub_avx2_32,@function
.align 16
sp_2048_cond_sub_avx2_32:
#else
.section __TEXT,__text
.globl _sp_2048_cond_sub_avx2_32
.p2align 4
_sp_2048_cond_sub_avx2_32:
#endif /* __APPLE__ */
movq (%rdx), %r10
movq (%rsi), %r8
pextq %rcx, %r10, %r10
subq %r10, %r8
movq 8(%rdx), %r10
movq 8(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, (%rdi)
sbbq %r10, %r9
movq 16(%rdx), %r8
movq 16(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 8(%rdi)
sbbq %r8, %r10
movq 24(%rdx), %r9
movq 24(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 16(%rdi)
sbbq %r9, %r8
movq 32(%rdx), %r10
movq 32(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 24(%rdi)
sbbq %r10, %r9
movq 40(%rdx), %r8
movq 40(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 32(%rdi)
sbbq %r8, %r10
movq 48(%rdx), %r9
movq 48(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 40(%rdi)
sbbq %r9, %r8
movq 56(%rdx), %r10
movq 56(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 48(%rdi)
sbbq %r10, %r9
movq 64(%rdx), %r8
movq 64(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 56(%rdi)
sbbq %r8, %r10
movq 72(%rdx), %r9
movq 72(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 64(%rdi)
sbbq %r9, %r8
movq 80(%rdx), %r10
movq 80(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 72(%rdi)
sbbq %r10, %r9
movq 88(%rdx), %r8
movq 88(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 80(%rdi)
sbbq %r8, %r10
movq 96(%rdx), %r9
movq 96(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 88(%rdi)
sbbq %r9, %r8
movq 104(%rdx), %r10
movq 104(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 96(%rdi)
sbbq %r10, %r9
movq 112(%rdx), %r8
movq 112(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 104(%rdi)
sbbq %r8, %r10
movq 120(%rdx), %r9
movq 120(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 112(%rdi)
sbbq %r9, %r8
movq 128(%rdx), %r10
movq 128(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 120(%rdi)
sbbq %r10, %r9
movq 136(%rdx), %r8
movq 136(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 128(%rdi)
sbbq %r8, %r10
movq 144(%rdx), %r9
movq 144(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 136(%rdi)
sbbq %r9, %r8
movq 152(%rdx), %r10
movq 152(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 144(%rdi)
sbbq %r10, %r9
movq 160(%rdx), %r8
movq 160(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 152(%rdi)
sbbq %r8, %r10
movq 168(%rdx), %r9
movq 168(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 160(%rdi)
sbbq %r9, %r8
movq 176(%rdx), %r10
movq 176(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 168(%rdi)
sbbq %r10, %r9
movq 184(%rdx), %r8
movq 184(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 176(%rdi)
sbbq %r8, %r10
movq 192(%rdx), %r9
movq 192(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 184(%rdi)
sbbq %r9, %r8
movq 200(%rdx), %r10
movq 200(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 192(%rdi)
sbbq %r10, %r9
movq 208(%rdx), %r8
movq 208(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 200(%rdi)
sbbq %r8, %r10
movq 216(%rdx), %r9
movq 216(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 208(%rdi)
sbbq %r9, %r8
movq 224(%rdx), %r10
movq 224(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 216(%rdi)
sbbq %r10, %r9
movq 232(%rdx), %r8
movq 232(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 224(%rdi)
sbbq %r8, %r10
movq 240(%rdx), %r9
movq 240(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 232(%rdi)
sbbq %r9, %r8
movq 248(%rdx), %r10
movq 248(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 240(%rdi)
sbbq %r10, %r9
movq %r9, 248(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_2048_cond_sub_avx2_32,.-sp_2048_cond_sub_avx2_32
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Compare a with b in constant time.
*
* a A single precision integer.
* b A single precision integer.
* return -ve, 0 or +ve if a is less than, equal to or greater than b
* respectively.
*/
#ifndef __APPLE__
.text
.globl sp_2048_cmp_32
.type sp_2048_cmp_32,@function
.align 16
sp_2048_cmp_32:
#else
.section __TEXT,__text
.globl _sp_2048_cmp_32
.p2align 4
_sp_2048_cmp_32:
#endif /* __APPLE__ */
xorq %rcx, %rcx
movq $-1, %rdx
movq $-1, %rax
movq $0x01, %r8
movq 248(%rdi), %r9
movq 248(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 240(%rdi), %r9
movq 240(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 232(%rdi), %r9
movq 232(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 224(%rdi), %r9
movq 224(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 216(%rdi), %r9
movq 216(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 208(%rdi), %r9
movq 208(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 200(%rdi), %r9
movq 200(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 192(%rdi), %r9
movq 192(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 184(%rdi), %r9
movq 184(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 176(%rdi), %r9
movq 176(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 168(%rdi), %r9
movq 168(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 160(%rdi), %r9
movq 160(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 152(%rdi), %r9
movq 152(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 144(%rdi), %r9
movq 144(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 136(%rdi), %r9
movq 136(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 128(%rdi), %r9
movq 128(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 120(%rdi), %r9
movq 120(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 112(%rdi), %r9
movq 112(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 104(%rdi), %r9
movq 104(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 96(%rdi), %r9
movq 96(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 88(%rdi), %r9
movq 88(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 80(%rdi), %r9
movq 80(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 72(%rdi), %r9
movq 72(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 64(%rdi), %r9
movq 64(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 56(%rdi), %r9
movq 56(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 48(%rdi), %r9
movq 48(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 40(%rdi), %r9
movq 40(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 32(%rdi), %r9
movq 32(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 24(%rdi), %r9
movq 24(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 16(%rdi), %r9
movq 16(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 8(%rdi), %r9
movq 8(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq (%rdi), %r9
movq (%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
xorq %rdx, %rax
repz retq
#ifndef __APPLE__
.size sp_2048_cmp_32,.-sp_2048_cmp_32
#endif /* __APPLE__ */
#ifndef WC_NO_CACHE_RESISTANT
#ifndef __APPLE__
.text
.globl sp_2048_get_from_table_32
.type sp_2048_get_from_table_32,@function
.align 16
sp_2048_get_from_table_32:
#else
.section __TEXT,__text
.globl _sp_2048_get_from_table_32
.p2align 4
_sp_2048_get_from_table_32:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %rdx, %xmm10
movd %rax, %xmm11
pxor %xmm13, %xmm13
pshufd $0x00, %xmm11, %xmm11
pshufd $0x00, %xmm10, %xmm10
# START: 0-7
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 16
movq 128(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 17
movq 136(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 18
movq 144(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 19
movq 152(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 20
movq 160(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 21
movq 168(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 22
movq 176(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 23
movq 184(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 24
movq 192(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 25
movq 200(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 26
movq 208(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 27
movq 216(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 28
movq 224(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 29
movq 232(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 30
movq 240(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 31
movq 248(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 32
movq 256(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 33
movq 264(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 34
movq 272(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 35
movq 280(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 36
movq 288(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 37
movq 296(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 38
movq 304(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 39
movq 312(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 40
movq 320(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 41
movq 328(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 42
movq 336(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 43
movq 344(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 44
movq 352(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 45
movq 360(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 46
movq 368(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 47
movq 376(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 48
movq 384(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 49
movq 392(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 50
movq 400(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 51
movq 408(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 52
movq 416(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 53
movq 424(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 54
movq 432(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 55
movq 440(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 56
movq 448(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 57
movq 456(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 58
movq 464(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 59
movq 472(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 60
movq 480(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 61
movq 488(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 62
movq 496(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 63
movq 504(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 0-7
# START: 8-15
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 16
movq 128(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 17
movq 136(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 18
movq 144(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 19
movq 152(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 20
movq 160(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 21
movq 168(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 22
movq 176(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 23
movq 184(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 24
movq 192(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 25
movq 200(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 26
movq 208(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 27
movq 216(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 28
movq 224(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 29
movq 232(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 30
movq 240(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 31
movq 248(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 32
movq 256(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 33
movq 264(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 34
movq 272(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 35
movq 280(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 36
movq 288(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 37
movq 296(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 38
movq 304(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 39
movq 312(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 40
movq 320(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 41
movq 328(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 42
movq 336(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 43
movq 344(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 44
movq 352(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 45
movq 360(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 46
movq 368(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 47
movq 376(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 48
movq 384(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 49
movq 392(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 50
movq 400(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 51
movq 408(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 52
movq 416(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 53
movq 424(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 54
movq 432(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 55
movq 440(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 56
movq 448(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 57
movq 456(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 58
movq 464(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 59
movq 472(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 60
movq 480(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 61
movq 488(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 62
movq 496(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 63
movq 504(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 8-15
# START: 16-23
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 16
movq 128(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 17
movq 136(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 18
movq 144(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 19
movq 152(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 20
movq 160(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 21
movq 168(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 22
movq 176(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 23
movq 184(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 24
movq 192(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 25
movq 200(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 26
movq 208(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 27
movq 216(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 28
movq 224(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 29
movq 232(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 30
movq 240(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 31
movq 248(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 32
movq 256(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 33
movq 264(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 34
movq 272(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 35
movq 280(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 36
movq 288(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 37
movq 296(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 38
movq 304(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 39
movq 312(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 40
movq 320(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 41
movq 328(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 42
movq 336(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 43
movq 344(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 44
movq 352(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 45
movq 360(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 46
movq 368(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 47
movq 376(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 48
movq 384(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 49
movq 392(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 50
movq 400(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 51
movq 408(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 52
movq 416(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 53
movq 424(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 54
movq 432(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 55
movq 440(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 56
movq 448(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 57
movq 456(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 58
movq 464(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 59
movq 472(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 60
movq 480(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 61
movq 488(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 62
movq 496(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 63
movq 504(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 16-23
# START: 24-31
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 16
movq 128(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 17
movq 136(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 18
movq 144(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 19
movq 152(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 20
movq 160(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 21
movq 168(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 22
movq 176(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 23
movq 184(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 24
movq 192(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 25
movq 200(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 26
movq 208(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 27
movq 216(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 28
movq 224(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 29
movq 232(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 30
movq 240(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 31
movq 248(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 32
movq 256(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 33
movq 264(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 34
movq 272(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 35
movq 280(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 36
movq 288(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 37
movq 296(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 38
movq 304(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 39
movq 312(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 40
movq 320(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 41
movq 328(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 42
movq 336(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 43
movq 344(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 44
movq 352(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 45
movq 360(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 46
movq 368(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 47
movq 376(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 48
movq 384(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 49
movq 392(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 50
movq 400(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 51
movq 408(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 52
movq 416(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 53
movq 424(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 54
movq 432(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 55
movq 440(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 56
movq 448(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 57
movq 456(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 58
movq 464(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 59
movq 472(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 60
movq 480(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 61
movq 488(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 62
movq 496(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 63
movq 504(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
# END: 24-31
repz retq
#ifndef __APPLE__
.size sp_2048_get_from_table_32,.-sp_2048_get_from_table_32
#endif /* __APPLE__ */
#endif /* !WC_NO_CACHE_RESISTANT */
#ifdef HAVE_INTEL_AVX2
/* Reduce the number back to 2048 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_2048_mont_reduce_avx2_32
.type sp_2048_mont_reduce_avx2_32,@function
.align 16
sp_2048_mont_reduce_avx2_32:
#else
.section __TEXT,__text
.globl _sp_2048_mont_reduce_avx2_32
.p2align 4
_sp_2048_mont_reduce_avx2_32:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %r8
xorq %rbp, %rbp
# i = 32
movq $32, %r9
movq (%rdi), %r12
movq 8(%rdi), %r13
movq 16(%rdi), %r14
movq 24(%rdi), %r15
addq $0x80, %rdi
xorq %rbp, %rbp
L_2048_mont_reduce_avx2_32_loop:
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r10
imulq %r8, %rdx
xorq %rbx, %rbx
# a[i+0] += m[0] * mu
mulxq (%rsi), %rax, %rcx
movq %r13, %r12
adcxq %rax, %r10
adoxq %rcx, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rax, %rcx
movq %r14, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rax, %rcx
movq %r15, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rax, %rcx
movq -96(%rdi), %r15
adcxq %rax, %r14
adoxq %rcx, %r15
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rax, %rcx
movq -88(%rdi), %r11
adcxq %rax, %r15
adoxq %rcx, %r11
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rax, %rcx
movq -80(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -88(%rdi)
# a[i+6] += m[6] * mu
mulxq 48(%rsi), %rax, %rcx
movq -72(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -80(%rdi)
# a[i+7] += m[7] * mu
mulxq 56(%rsi), %rax, %rcx
movq -64(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -72(%rdi)
# a[i+8] += m[8] * mu
mulxq 64(%rsi), %rax, %rcx
movq -56(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -64(%rdi)
# a[i+9] += m[9] * mu
mulxq 72(%rsi), %rax, %rcx
movq -48(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -56(%rdi)
# a[i+10] += m[10] * mu
mulxq 80(%rsi), %rax, %rcx
movq -40(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -48(%rdi)
# a[i+11] += m[11] * mu
mulxq 88(%rsi), %rax, %rcx
movq -32(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -40(%rdi)
# a[i+12] += m[12] * mu
mulxq 96(%rsi), %rax, %rcx
movq -24(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -32(%rdi)
# a[i+13] += m[13] * mu
mulxq 104(%rsi), %rax, %rcx
movq -16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -24(%rdi)
# a[i+14] += m[14] * mu
mulxq 112(%rsi), %rax, %rcx
movq -8(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -16(%rdi)
# a[i+15] += m[15] * mu
mulxq 120(%rsi), %rax, %rcx
movq (%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -8(%rdi)
# a[i+16] += m[16] * mu
mulxq 128(%rsi), %rax, %rcx
movq 8(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, (%rdi)
# a[i+17] += m[17] * mu
mulxq 136(%rsi), %rax, %rcx
movq 16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 8(%rdi)
# a[i+18] += m[18] * mu
mulxq 144(%rsi), %rax, %rcx
movq 24(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 16(%rdi)
# a[i+19] += m[19] * mu
mulxq 152(%rsi), %rax, %rcx
movq 32(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 24(%rdi)
# a[i+20] += m[20] * mu
mulxq 160(%rsi), %rax, %rcx
movq 40(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 32(%rdi)
# a[i+21] += m[21] * mu
mulxq 168(%rsi), %rax, %rcx
movq 48(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 40(%rdi)
# a[i+22] += m[22] * mu
mulxq 176(%rsi), %rax, %rcx
movq 56(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rdi)
# a[i+23] += m[23] * mu
mulxq 184(%rsi), %rax, %rcx
movq 64(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 56(%rdi)
# a[i+24] += m[24] * mu
mulxq 192(%rsi), %rax, %rcx
movq 72(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 64(%rdi)
# a[i+25] += m[25] * mu
mulxq 200(%rsi), %rax, %rcx
movq 80(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 72(%rdi)
# a[i+26] += m[26] * mu
mulxq 208(%rsi), %rax, %rcx
movq 88(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 80(%rdi)
# a[i+27] += m[27] * mu
mulxq 216(%rsi), %rax, %rcx
movq 96(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 88(%rdi)
# a[i+28] += m[28] * mu
mulxq 224(%rsi), %rax, %rcx
movq 104(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 96(%rdi)
# a[i+29] += m[29] * mu
mulxq 232(%rsi), %rax, %rcx
movq 112(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 104(%rdi)
# a[i+30] += m[30] * mu
mulxq 240(%rsi), %rax, %rcx
movq 120(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 112(%rdi)
# a[i+31] += m[31] * mu
mulxq 248(%rsi), %rax, %rcx
movq 128(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 120(%rdi)
adcxq %rbp, %r10
movq %rbx, %rbp
movq %r10, 128(%rdi)
adoxq %rbx, %rbp
adcxq %rbx, %rbp
# a += 1
addq $8, %rdi
# i -= 1
subq $0x01, %r9
jnz L_2048_mont_reduce_avx2_32_loop
subq $0x80, %rdi
negq %rbp
movq %rdi, %r8
subq $0x100, %rdi
movq (%rsi), %rcx
movq %r12, %rdx
pextq %rbp, %rcx, %rcx
subq %rcx, %rdx
movq 8(%rsi), %rcx
movq %r13, %rax
pextq %rbp, %rcx, %rcx
movq %rdx, (%rdi)
sbbq %rcx, %rax
movq 16(%rsi), %rdx
movq %r14, %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 8(%rdi)
sbbq %rdx, %rcx
movq 24(%rsi), %rax
movq %r15, %rdx
pextq %rbp, %rax, %rax
movq %rcx, 16(%rdi)
sbbq %rax, %rdx
movq 32(%rsi), %rcx
movq 32(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 24(%rdi)
sbbq %rcx, %rax
movq 40(%rsi), %rdx
movq 40(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 32(%rdi)
sbbq %rdx, %rcx
movq 48(%rsi), %rax
movq 48(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 40(%rdi)
sbbq %rax, %rdx
movq 56(%rsi), %rcx
movq 56(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 48(%rdi)
sbbq %rcx, %rax
movq 64(%rsi), %rdx
movq 64(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 56(%rdi)
sbbq %rdx, %rcx
movq 72(%rsi), %rax
movq 72(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 64(%rdi)
sbbq %rax, %rdx
movq 80(%rsi), %rcx
movq 80(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 72(%rdi)
sbbq %rcx, %rax
movq 88(%rsi), %rdx
movq 88(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 80(%rdi)
sbbq %rdx, %rcx
movq 96(%rsi), %rax
movq 96(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 88(%rdi)
sbbq %rax, %rdx
movq 104(%rsi), %rcx
movq 104(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 96(%rdi)
sbbq %rcx, %rax
movq 112(%rsi), %rdx
movq 112(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 104(%rdi)
sbbq %rdx, %rcx
movq 120(%rsi), %rax
movq 120(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 112(%rdi)
sbbq %rax, %rdx
movq 128(%rsi), %rcx
movq 128(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 120(%rdi)
sbbq %rcx, %rax
movq 136(%rsi), %rdx
movq 136(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 128(%rdi)
sbbq %rdx, %rcx
movq 144(%rsi), %rax
movq 144(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 136(%rdi)
sbbq %rax, %rdx
movq 152(%rsi), %rcx
movq 152(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 144(%rdi)
sbbq %rcx, %rax
movq 160(%rsi), %rdx
movq 160(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 152(%rdi)
sbbq %rdx, %rcx
movq 168(%rsi), %rax
movq 168(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 160(%rdi)
sbbq %rax, %rdx
movq 176(%rsi), %rcx
movq 176(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 168(%rdi)
sbbq %rcx, %rax
movq 184(%rsi), %rdx
movq 184(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 176(%rdi)
sbbq %rdx, %rcx
movq 192(%rsi), %rax
movq 192(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 184(%rdi)
sbbq %rax, %rdx
movq 200(%rsi), %rcx
movq 200(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 192(%rdi)
sbbq %rcx, %rax
movq 208(%rsi), %rdx
movq 208(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 200(%rdi)
sbbq %rdx, %rcx
movq 216(%rsi), %rax
movq 216(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 208(%rdi)
sbbq %rax, %rdx
movq 224(%rsi), %rcx
movq 224(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 216(%rdi)
sbbq %rcx, %rax
movq 232(%rsi), %rdx
movq 232(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 224(%rdi)
sbbq %rdx, %rcx
movq 240(%rsi), %rax
movq 240(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 232(%rdi)
sbbq %rax, %rdx
movq 248(%rsi), %rcx
movq 248(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 240(%rdi)
sbbq %rcx, %rax
movq %rax, 248(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_2048_mont_reduce_avx2_32,.-sp_2048_mont_reduce_avx2_32
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifndef WC_NO_CACHE_RESISTANT
#ifndef __APPLE__
.text
.globl sp_2048_get_from_table_avx2_32
.type sp_2048_get_from_table_avx2_32,@function
.align 16
sp_2048_get_from_table_avx2_32:
#else
.section __TEXT,__text
.globl _sp_2048_get_from_table_avx2_32
.p2align 4
_sp_2048_get_from_table_avx2_32:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %rdx, %xmm10
movd %rax, %xmm11
vpxor %ymm13, %ymm13, %ymm13
vpermd %ymm10, %ymm13, %ymm10
vpermd %ymm11, %ymm13, %ymm11
# START: 0-15
vpxor %ymm13, %ymm13, %ymm13
vpxor %ymm4, %ymm4, %ymm4
vpxor %ymm5, %ymm5, %ymm5
vpxor %ymm6, %ymm6, %ymm6
vpxor %ymm7, %ymm7, %ymm7
# ENTRY: 0
movq (%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 1
movq 8(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 2
movq 16(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 3
movq 24(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 4
movq 32(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 5
movq 40(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 6
movq 48(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 7
movq 56(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 8
movq 64(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 9
movq 72(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 10
movq 80(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 11
movq 88(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 12
movq 96(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 13
movq 104(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 14
movq 112(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 15
movq 120(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 16
movq 128(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 17
movq 136(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 18
movq 144(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 19
movq 152(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 20
movq 160(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 21
movq 168(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 22
movq 176(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 23
movq 184(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 24
movq 192(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 25
movq 200(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 26
movq 208(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 27
movq 216(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 28
movq 224(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 29
movq 232(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 30
movq 240(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 31
movq 248(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 32
movq 256(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 33
movq 264(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 34
movq 272(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 35
movq 280(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 36
movq 288(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 37
movq 296(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 38
movq 304(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 39
movq 312(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 40
movq 320(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 41
movq 328(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 42
movq 336(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 43
movq 344(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 44
movq 352(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 45
movq 360(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 46
movq 368(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 47
movq 376(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 48
movq 384(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 49
movq 392(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 50
movq 400(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 51
movq 408(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 52
movq 416(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 53
movq 424(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 54
movq 432(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 55
movq 440(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 56
movq 448(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 57
movq 456(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 58
movq 464(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 59
movq 472(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 60
movq 480(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 61
movq 488(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 62
movq 496(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 63
movq 504(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
vmovdqu %ymm4, (%rdi)
vmovdqu %ymm5, 32(%rdi)
vmovdqu %ymm6, 64(%rdi)
vmovdqu %ymm7, 96(%rdi)
addq $0x80, %rdi
# END: 0-15
# START: 16-31
vpxor %ymm13, %ymm13, %ymm13
vpxor %ymm4, %ymm4, %ymm4
vpxor %ymm5, %ymm5, %ymm5
vpxor %ymm6, %ymm6, %ymm6
vpxor %ymm7, %ymm7, %ymm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 16
movq 128(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 17
movq 136(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 18
movq 144(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 19
movq 152(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 20
movq 160(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 21
movq 168(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 22
movq 176(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 23
movq 184(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 24
movq 192(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 25
movq 200(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 26
movq 208(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 27
movq 216(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 28
movq 224(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 29
movq 232(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 30
movq 240(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 31
movq 248(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 32
movq 256(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 33
movq 264(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 34
movq 272(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 35
movq 280(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 36
movq 288(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 37
movq 296(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 38
movq 304(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 39
movq 312(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 40
movq 320(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 41
movq 328(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 42
movq 336(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 43
movq 344(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 44
movq 352(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 45
movq 360(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 46
movq 368(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 47
movq 376(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 48
movq 384(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 49
movq 392(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 50
movq 400(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 51
movq 408(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 52
movq 416(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 53
movq 424(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 54
movq 432(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 55
movq 440(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 56
movq 448(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 57
movq 456(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 58
movq 464(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 59
movq 472(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 60
movq 480(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 61
movq 488(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 62
movq 496(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 63
movq 504(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
vmovdqu %ymm4, (%rdi)
vmovdqu %ymm5, 32(%rdi)
vmovdqu %ymm6, 64(%rdi)
vmovdqu %ymm7, 96(%rdi)
# END: 16-31
repz retq
#ifndef __APPLE__
.size sp_2048_get_from_table_avx2_32,.-sp_2048_get_from_table_avx2_32
#endif /* __APPLE__ */
#endif /* !WC_NO_CACHE_RESISTANT */
/* Conditionally add a and b using the mask m.
* m is -1 to add and 0 when not.
*
* r A single precision number representing conditional add result.
* a A single precision number to add with.
* b A single precision number to add.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_2048_cond_add_16
.type sp_2048_cond_add_16,@function
.align 16
sp_2048_cond_add_16:
#else
.section __TEXT,__text
.globl _sp_2048_cond_add_16
.p2align 4
_sp_2048_cond_add_16:
#endif /* __APPLE__ */
subq $0x80, %rsp
movq $0x00, %rax
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 80(%rsp)
movq %r9, 88(%rsp)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 96(%rsp)
movq %r9, 104(%rsp)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 112(%rsp)
movq %r9, 120(%rsp)
movq (%rsi), %r8
movq (%rsp), %rdx
addq %rdx, %r8
movq 8(%rsi), %r9
movq 8(%rsp), %rdx
adcq %rdx, %r9
movq %r8, (%rdi)
movq 16(%rsi), %r8
movq 16(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 8(%rdi)
movq 24(%rsi), %r9
movq 24(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 16(%rdi)
movq 32(%rsi), %r8
movq 32(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 24(%rdi)
movq 40(%rsi), %r9
movq 40(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 32(%rdi)
movq 48(%rsi), %r8
movq 48(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 40(%rdi)
movq 56(%rsi), %r9
movq 56(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 48(%rdi)
movq 64(%rsi), %r8
movq 64(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 56(%rdi)
movq 72(%rsi), %r9
movq 72(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 64(%rdi)
movq 80(%rsi), %r8
movq 80(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 72(%rdi)
movq 88(%rsi), %r9
movq 88(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 80(%rdi)
movq 96(%rsi), %r8
movq 96(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 88(%rdi)
movq 104(%rsi), %r9
movq 104(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 96(%rdi)
movq 112(%rsi), %r8
movq 112(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 104(%rdi)
movq 120(%rsi), %r9
movq 120(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
adcq $0x00, %rax
addq $0x80, %rsp
repz retq
#ifndef __APPLE__
.size sp_2048_cond_add_16,.-sp_2048_cond_add_16
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Conditionally add a and b using the mask m.
* m is -1 to add and 0 when not.
*
* r A single precision number representing conditional add result.
* a A single precision number to add with.
* b A single precision number to add.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_2048_cond_add_avx2_16
.type sp_2048_cond_add_avx2_16,@function
.align 16
sp_2048_cond_add_avx2_16:
#else
.section __TEXT,__text
.globl _sp_2048_cond_add_avx2_16
.p2align 4
_sp_2048_cond_add_avx2_16:
#endif /* __APPLE__ */
movq $0x00, %rax
movq (%rdx), %r10
movq (%rsi), %r8
pextq %rcx, %r10, %r10
addq %r10, %r8
movq 8(%rdx), %r10
movq 8(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, (%rdi)
adcq %r10, %r9
movq 16(%rdx), %r8
movq 16(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 8(%rdi)
adcq %r8, %r10
movq 24(%rdx), %r9
movq 24(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 16(%rdi)
adcq %r9, %r8
movq 32(%rdx), %r10
movq 32(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 24(%rdi)
adcq %r10, %r9
movq 40(%rdx), %r8
movq 40(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 32(%rdi)
adcq %r8, %r10
movq 48(%rdx), %r9
movq 48(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 40(%rdi)
adcq %r9, %r8
movq 56(%rdx), %r10
movq 56(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 48(%rdi)
adcq %r10, %r9
movq 64(%rdx), %r8
movq 64(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 56(%rdi)
adcq %r8, %r10
movq 72(%rdx), %r9
movq 72(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 64(%rdi)
adcq %r9, %r8
movq 80(%rdx), %r10
movq 80(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 72(%rdi)
adcq %r10, %r9
movq 88(%rdx), %r8
movq 88(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 80(%rdi)
adcq %r8, %r10
movq 96(%rdx), %r9
movq 96(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 88(%rdi)
adcq %r9, %r8
movq 104(%rdx), %r10
movq 104(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 96(%rdi)
adcq %r10, %r9
movq 112(%rdx), %r8
movq 112(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 104(%rdi)
adcq %r8, %r10
movq 120(%rdx), %r9
movq 120(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 112(%rdi)
adcq %r9, %r8
movq %r8, 120(%rdi)
adcq $0x00, %rax
repz retq
#ifndef __APPLE__
.size sp_2048_cond_add_avx2_16,.-sp_2048_cond_add_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Shift number left by n bit. (r = a << n)
*
* r Result of left shift by n.
* a Number to shift.
* n Amoutnt o shift.
*/
#ifndef __APPLE__
.text
.globl sp_2048_lshift_32
.type sp_2048_lshift_32,@function
.align 16
sp_2048_lshift_32:
#else
.section __TEXT,__text
.globl _sp_2048_lshift_32
.p2align 4
_sp_2048_lshift_32:
#endif /* __APPLE__ */
movb %dl, %cl
movq $0x00, %r10
movq 216(%rsi), %r11
movq 224(%rsi), %rdx
movq 232(%rsi), %rax
movq 240(%rsi), %r8
movq 248(%rsi), %r9
shldq %cl, %r9, %r10
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 224(%rdi)
movq %rax, 232(%rdi)
movq %r8, 240(%rdi)
movq %r9, 248(%rdi)
movq %r10, 256(%rdi)
movq 184(%rsi), %r9
movq 192(%rsi), %rdx
movq 200(%rsi), %rax
movq 208(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 192(%rdi)
movq %rax, 200(%rdi)
movq %r8, 208(%rdi)
movq %r11, 216(%rdi)
movq 152(%rsi), %r11
movq 160(%rsi), %rdx
movq 168(%rsi), %rax
movq 176(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 160(%rdi)
movq %rax, 168(%rdi)
movq %r8, 176(%rdi)
movq %r9, 184(%rdi)
movq 120(%rsi), %r9
movq 128(%rsi), %rdx
movq 136(%rsi), %rax
movq 144(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 128(%rdi)
movq %rax, 136(%rdi)
movq %r8, 144(%rdi)
movq %r11, 152(%rdi)
movq 88(%rsi), %r11
movq 96(%rsi), %rdx
movq 104(%rsi), %rax
movq 112(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 96(%rdi)
movq %rax, 104(%rdi)
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
movq 56(%rsi), %r9
movq 64(%rsi), %rdx
movq 72(%rsi), %rax
movq 80(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 64(%rdi)
movq %rax, 72(%rdi)
movq %r8, 80(%rdi)
movq %r11, 88(%rdi)
movq 24(%rsi), %r11
movq 32(%rsi), %rdx
movq 40(%rsi), %rax
movq 48(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 32(%rdi)
movq %rax, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shlq %cl, %rdx
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %r8, 16(%rdi)
movq %r11, 24(%rdi)
repz retq
#ifndef __APPLE__
.size sp_2048_lshift_32,.-sp_2048_lshift_32
#endif /* __APPLE__ */
#endif /* !WOLFSSL_SP_NO_2048 */
#endif /* !WOLFSSL_SP_NO_2048 */
#ifndef WOLFSSL_SP_NO_3072
#ifndef WOLFSSL_SP_NO_3072
/* Read big endian unsigned byte array into r.
* Uses the bswap instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_3072_from_bin_bswap
.type sp_3072_from_bin_bswap,@function
.align 16
sp_3072_from_bin_bswap:
#else
.section __TEXT,__text
.globl _sp_3072_from_bin_bswap
.p2align 4
_sp_3072_from_bin_bswap:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $0x180, %r10
xorq %r11, %r11
jmp L_3072_from_bin_bswap_64_end
L_3072_from_bin_bswap_64_start:
subq $0x40, %r9
movq 56(%r9), %rax
movq 48(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq 40(%r9), %rax
movq 32(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movq 24(%r9), %rax
movq 16(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq 8(%r9), %rax
movq (%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_3072_from_bin_bswap_64_end:
cmpq $63, %rcx
jg L_3072_from_bin_bswap_64_start
jmp L_3072_from_bin_bswap_8_end
L_3072_from_bin_bswap_8_start:
subq $8, %r9
movq (%r9), %rax
bswapq %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_3072_from_bin_bswap_8_end:
cmpq $7, %rcx
jg L_3072_from_bin_bswap_8_start
cmpq %r11, %rcx
je L_3072_from_bin_bswap_hi_end
movq %r11, %r8
movq %r11, %rax
L_3072_from_bin_bswap_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_3072_from_bin_bswap_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_3072_from_bin_bswap_hi_end:
cmpq %r10, %rdi
jge L_3072_from_bin_bswap_zero_end
L_3072_from_bin_bswap_zero_start:
movq %r11, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_3072_from_bin_bswap_zero_start
L_3072_from_bin_bswap_zero_end:
repz retq
#ifndef __APPLE__
.size sp_3072_from_bin_bswap,.-sp_3072_from_bin_bswap
#endif /* __APPLE__ */
#ifndef NO_MOVBE_SUPPORT
/* Read big endian unsigned byte array into r.
* Uses the movbe instruction which is an optional instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_3072_from_bin_movbe
.type sp_3072_from_bin_movbe,@function
.align 16
sp_3072_from_bin_movbe:
#else
.section __TEXT,__text
.globl _sp_3072_from_bin_movbe
.p2align 4
_sp_3072_from_bin_movbe:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $0x180, %r10
jmp L_3072_from_bin_movbe_64_end
L_3072_from_bin_movbe_64_start:
subq $0x40, %r9
movbeq 56(%r9), %rax
movbeq 48(%r9), %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movbeq 40(%r9), %rax
movbeq 32(%r9), %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movbeq 24(%r9), %rax
movbeq 16(%r9), %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movbeq 8(%r9), %rax
movbeq (%r9), %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_3072_from_bin_movbe_64_end:
cmpq $63, %rcx
jg L_3072_from_bin_movbe_64_start
jmp L_3072_from_bin_movbe_8_end
L_3072_from_bin_movbe_8_start:
subq $8, %r9
movbeq (%r9), %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_3072_from_bin_movbe_8_end:
cmpq $7, %rcx
jg L_3072_from_bin_movbe_8_start
cmpq $0x00, %rcx
je L_3072_from_bin_movbe_hi_end
movq $0x00, %r8
movq $0x00, %rax
L_3072_from_bin_movbe_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_3072_from_bin_movbe_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_3072_from_bin_movbe_hi_end:
cmpq %r10, %rdi
jge L_3072_from_bin_movbe_zero_end
L_3072_from_bin_movbe_zero_start:
movq $0x00, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_3072_from_bin_movbe_zero_start
L_3072_from_bin_movbe_zero_end:
repz retq
#ifndef __APPLE__
.size sp_3072_from_bin_movbe,.-sp_3072_from_bin_movbe
#endif /* __APPLE__ */
#endif /* !NO_MOVBE_SUPPORT */
/* Write r as big endian to byte array.
* Fixed length number of bytes written: 384
* Uses the bswap instruction.
*
* r A single precision integer.
* a Byte array.
*/
#ifndef __APPLE__
.text
.globl sp_3072_to_bin_bswap_48
.type sp_3072_to_bin_bswap_48,@function
.align 16
sp_3072_to_bin_bswap_48:
#else
.section __TEXT,__text
.globl _sp_3072_to_bin_bswap_48
.p2align 4
_sp_3072_to_bin_bswap_48:
#endif /* __APPLE__ */
movq 376(%rdi), %rdx
movq 368(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, (%rsi)
movq %rax, 8(%rsi)
movq 360(%rdi), %rdx
movq 352(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 16(%rsi)
movq %rax, 24(%rsi)
movq 344(%rdi), %rdx
movq 336(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 32(%rsi)
movq %rax, 40(%rsi)
movq 328(%rdi), %rdx
movq 320(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 48(%rsi)
movq %rax, 56(%rsi)
movq 312(%rdi), %rdx
movq 304(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 64(%rsi)
movq %rax, 72(%rsi)
movq 296(%rdi), %rdx
movq 288(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 80(%rsi)
movq %rax, 88(%rsi)
movq 280(%rdi), %rdx
movq 272(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 96(%rsi)
movq %rax, 104(%rsi)
movq 264(%rdi), %rdx
movq 256(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 112(%rsi)
movq %rax, 120(%rsi)
movq 248(%rdi), %rdx
movq 240(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 128(%rsi)
movq %rax, 136(%rsi)
movq 232(%rdi), %rdx
movq 224(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 144(%rsi)
movq %rax, 152(%rsi)
movq 216(%rdi), %rdx
movq 208(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 160(%rsi)
movq %rax, 168(%rsi)
movq 200(%rdi), %rdx
movq 192(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 176(%rsi)
movq %rax, 184(%rsi)
movq 184(%rdi), %rdx
movq 176(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 192(%rsi)
movq %rax, 200(%rsi)
movq 168(%rdi), %rdx
movq 160(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 208(%rsi)
movq %rax, 216(%rsi)
movq 152(%rdi), %rdx
movq 144(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 224(%rsi)
movq %rax, 232(%rsi)
movq 136(%rdi), %rdx
movq 128(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 240(%rsi)
movq %rax, 248(%rsi)
movq 120(%rdi), %rdx
movq 112(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 256(%rsi)
movq %rax, 264(%rsi)
movq 104(%rdi), %rdx
movq 96(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 272(%rsi)
movq %rax, 280(%rsi)
movq 88(%rdi), %rdx
movq 80(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 288(%rsi)
movq %rax, 296(%rsi)
movq 72(%rdi), %rdx
movq 64(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 304(%rsi)
movq %rax, 312(%rsi)
movq 56(%rdi), %rdx
movq 48(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 320(%rsi)
movq %rax, 328(%rsi)
movq 40(%rdi), %rdx
movq 32(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 336(%rsi)
movq %rax, 344(%rsi)
movq 24(%rdi), %rdx
movq 16(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 352(%rsi)
movq %rax, 360(%rsi)
movq 8(%rdi), %rdx
movq (%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 368(%rsi)
movq %rax, 376(%rsi)
repz retq
#ifndef __APPLE__
.size sp_3072_to_bin_bswap_48,.-sp_3072_to_bin_bswap_48
#endif /* __APPLE__ */
#ifndef NO_MOVBE_SUPPORT
/* Write r as big endian to byte array.
* Fixed length number of bytes written: 384
* Uses the movbe instruction which is optional.
*
* r A single precision integer.
* a Byte array.
*/
#ifndef __APPLE__
.text
.globl sp_3072_to_bin_movbe_48
.type sp_3072_to_bin_movbe_48,@function
.align 16
sp_3072_to_bin_movbe_48:
#else
.section __TEXT,__text
.globl _sp_3072_to_bin_movbe_48
.p2align 4
_sp_3072_to_bin_movbe_48:
#endif /* __APPLE__ */
movbeq 376(%rdi), %rdx
movbeq 368(%rdi), %rax
movq %rdx, (%rsi)
movq %rax, 8(%rsi)
movbeq 360(%rdi), %rdx
movbeq 352(%rdi), %rax
movq %rdx, 16(%rsi)
movq %rax, 24(%rsi)
movbeq 344(%rdi), %rdx
movbeq 336(%rdi), %rax
movq %rdx, 32(%rsi)
movq %rax, 40(%rsi)
movbeq 328(%rdi), %rdx
movbeq 320(%rdi), %rax
movq %rdx, 48(%rsi)
movq %rax, 56(%rsi)
movbeq 312(%rdi), %rdx
movbeq 304(%rdi), %rax
movq %rdx, 64(%rsi)
movq %rax, 72(%rsi)
movbeq 296(%rdi), %rdx
movbeq 288(%rdi), %rax
movq %rdx, 80(%rsi)
movq %rax, 88(%rsi)
movbeq 280(%rdi), %rdx
movbeq 272(%rdi), %rax
movq %rdx, 96(%rsi)
movq %rax, 104(%rsi)
movbeq 264(%rdi), %rdx
movbeq 256(%rdi), %rax
movq %rdx, 112(%rsi)
movq %rax, 120(%rsi)
movbeq 248(%rdi), %rdx
movbeq 240(%rdi), %rax
movq %rdx, 128(%rsi)
movq %rax, 136(%rsi)
movbeq 232(%rdi), %rdx
movbeq 224(%rdi), %rax
movq %rdx, 144(%rsi)
movq %rax, 152(%rsi)
movbeq 216(%rdi), %rdx
movbeq 208(%rdi), %rax
movq %rdx, 160(%rsi)
movq %rax, 168(%rsi)
movbeq 200(%rdi), %rdx
movbeq 192(%rdi), %rax
movq %rdx, 176(%rsi)
movq %rax, 184(%rsi)
movbeq 184(%rdi), %rdx
movbeq 176(%rdi), %rax
movq %rdx, 192(%rsi)
movq %rax, 200(%rsi)
movbeq 168(%rdi), %rdx
movbeq 160(%rdi), %rax
movq %rdx, 208(%rsi)
movq %rax, 216(%rsi)
movbeq 152(%rdi), %rdx
movbeq 144(%rdi), %rax
movq %rdx, 224(%rsi)
movq %rax, 232(%rsi)
movbeq 136(%rdi), %rdx
movbeq 128(%rdi), %rax
movq %rdx, 240(%rsi)
movq %rax, 248(%rsi)
movbeq 120(%rdi), %rdx
movbeq 112(%rdi), %rax
movq %rdx, 256(%rsi)
movq %rax, 264(%rsi)
movbeq 104(%rdi), %rdx
movbeq 96(%rdi), %rax
movq %rdx, 272(%rsi)
movq %rax, 280(%rsi)
movbeq 88(%rdi), %rdx
movbeq 80(%rdi), %rax
movq %rdx, 288(%rsi)
movq %rax, 296(%rsi)
movbeq 72(%rdi), %rdx
movbeq 64(%rdi), %rax
movq %rdx, 304(%rsi)
movq %rax, 312(%rsi)
movbeq 56(%rdi), %rdx
movbeq 48(%rdi), %rax
movq %rdx, 320(%rsi)
movq %rax, 328(%rsi)
movbeq 40(%rdi), %rdx
movbeq 32(%rdi), %rax
movq %rdx, 336(%rsi)
movq %rax, 344(%rsi)
movbeq 24(%rdi), %rdx
movbeq 16(%rdi), %rax
movq %rdx, 352(%rsi)
movq %rax, 360(%rsi)
movbeq 8(%rdi), %rdx
movbeq (%rdi), %rax
movq %rdx, 368(%rsi)
movq %rax, 376(%rsi)
repz retq
#ifndef __APPLE__
.size sp_3072_to_bin_movbe_48,.-sp_3072_to_bin_movbe_48
#endif /* __APPLE__ */
#endif /* NO_MOVBE_SUPPORT */
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mul_12
.type sp_3072_mul_12,@function
.align 16
sp_3072_mul_12:
#else
.section __TEXT,__text
.globl _sp_3072_mul_12
.p2align 4
_sp_3072_mul_12:
#endif /* __APPLE__ */
movq %rdx, %rcx
subq $0x60, %rsp
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
movq %rax, (%rsp)
movq %rdx, %r9
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 8(%rsp)
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 16(%rsp)
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 24(%rsp)
# A[0] * B[4]
movq 32(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[0]
movq (%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 32(%rsp)
# A[0] * B[5]
movq 40(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[4]
movq 32(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[1]
movq 8(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[0]
movq (%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 40(%rsp)
# A[0] * B[6]
movq 48(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[5]
movq 40(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[4]
movq 32(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[2]
movq 16(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[1]
movq 8(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[0]
movq (%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 48(%rsp)
# A[0] * B[7]
movq 56(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[6]
movq 48(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[5]
movq 40(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[4]
movq 32(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[3]
movq 24(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[2]
movq 16(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[1]
movq 8(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[0]
movq (%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 56(%rsp)
# A[0] * B[8]
movq 64(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[7]
movq 56(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[6]
movq 48(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[5]
movq 40(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[4]
movq 32(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[3]
movq 24(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[2]
movq 16(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[1]
movq 8(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[0]
movq (%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 64(%rsp)
# A[0] * B[9]
movq 72(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[8]
movq 64(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[7]
movq 56(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[6]
movq 48(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[5]
movq 40(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[4]
movq 32(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[3]
movq 24(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[2]
movq 16(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[1]
movq 8(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[0]
movq (%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 72(%rsp)
# A[0] * B[10]
movq 80(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[9]
movq 72(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[8]
movq 64(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[7]
movq 56(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[6]
movq 48(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[5]
movq 40(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[4]
movq 32(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[3]
movq 24(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[2]
movq 16(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[1]
movq 8(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[0]
movq (%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 80(%rsp)
# A[0] * B[11]
movq 88(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[10]
movq 80(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[9]
movq 72(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[8]
movq 64(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[7]
movq 56(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[6]
movq 48(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[5]
movq 40(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[4]
movq 32(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[3]
movq 24(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B[2]
movq 16(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[1]
movq 8(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[0]
movq (%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 88(%rsp)
# A[1] * B[11]
movq 88(%rcx), %rax
mulq 8(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[10]
movq 80(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[9]
movq 72(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[8]
movq 64(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[7]
movq 56(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[6]
movq 48(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[5]
movq 40(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[4]
movq 32(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[3]
movq 24(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B[2]
movq 16(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[1]
movq 8(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 96(%rdi)
# A[2] * B[11]
movq 88(%rcx), %rax
mulq 16(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[10]
movq 80(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[9]
movq 72(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[8]
movq 64(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[7]
movq 56(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[6]
movq 48(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[5]
movq 40(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[4]
movq 32(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[3]
movq 24(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B[2]
movq 16(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 104(%rdi)
# A[3] * B[11]
movq 88(%rcx), %rax
mulq 24(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[10]
movq 80(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[9]
movq 72(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[8]
movq 64(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[7]
movq 56(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[6]
movq 48(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B[5]
movq 40(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[4]
movq 32(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[3]
movq 24(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 112(%rdi)
# A[4] * B[11]
movq 88(%rcx), %rax
mulq 32(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[10]
movq 80(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[9]
movq 72(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[8]
movq 64(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[7]
movq 56(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[6]
movq 48(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B[5]
movq 40(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[4]
movq 32(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 120(%rdi)
# A[5] * B[11]
movq 88(%rcx), %rax
mulq 40(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[10]
movq 80(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[9]
movq 72(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[8]
movq 64(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[7]
movq 56(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[6]
movq 48(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B[5]
movq 40(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 128(%rdi)
# A[6] * B[11]
movq 88(%rcx), %rax
mulq 48(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[10]
movq 80(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[9]
movq 72(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B[8]
movq 64(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[7]
movq 56(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[6]
movq 48(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 136(%rdi)
# A[7] * B[11]
movq 88(%rcx), %rax
mulq 56(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[10]
movq 80(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[9]
movq 72(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B[8]
movq 64(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[7]
movq 56(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 144(%rdi)
# A[8] * B[11]
movq 88(%rcx), %rax
mulq 64(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[10]
movq 80(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[9]
movq 72(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B[8]
movq 64(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 152(%rdi)
# A[9] * B[11]
movq 88(%rcx), %rax
mulq 72(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[10]
movq 80(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[9]
movq 72(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 160(%rdi)
# A[10] * B[11]
movq 88(%rcx), %rax
mulq 80(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[10]
movq 80(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 168(%rdi)
# A[11] * B[11]
movq 88(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
movq %r9, 176(%rdi)
movq %r10, 184(%rdi)
movq (%rsp), %rax
movq 8(%rsp), %rdx
movq 16(%rsp), %r8
movq 24(%rsp), %r9
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rsp), %rax
movq 40(%rsp), %rdx
movq 48(%rsp), %r8
movq 56(%rsp), %r9
movq %rax, 32(%rdi)
movq %rdx, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rsp), %rax
movq 72(%rsp), %rdx
movq 80(%rsp), %r8
movq 88(%rsp), %r9
movq %rax, 64(%rdi)
movq %rdx, 72(%rdi)
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
addq $0x60, %rsp
repz retq
#ifndef __APPLE__
.size sp_3072_mul_12,.-sp_3072_mul_12
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Multiply a and b into r. (r = a * b)
*
* r Result of multiplication.
* a First number to multiply.
* b Second number to multiply.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mul_avx2_12
.type sp_3072_mul_avx2_12,@function
.align 16
sp_3072_mul_avx2_12:
#else
.section __TEXT,__text
.globl _sp_3072_mul_avx2_12
.p2align 4
_sp_3072_mul_avx2_12:
#endif /* __APPLE__ */
pushq %rbx
pushq %rbp
pushq %r12
movq %rdx, %rbp
subq $0x60, %rsp
cmpq %rdi, %rsi
movq %rsp, %rbx
cmovne %rdi, %rbx
cmpq %rdi, %rbp
cmove %rsp, %rbx
addq $0x60, %rdi
xorq %r12, %r12
movq (%rsi), %rdx
# A[0] * B[0]
mulx (%rbp), %r8, %r9
# A[0] * B[1]
mulx 8(%rbp), %rax, %r10
movq %r8, (%rbx)
adcxq %rax, %r9
movq %r9, 8(%rbx)
# A[0] * B[2]
mulx 16(%rbp), %rax, %r8
adcxq %rax, %r10
# A[0] * B[3]
mulx 24(%rbp), %rax, %r9
movq %r10, 16(%rbx)
adcxq %rax, %r8
movq %r8, 24(%rbx)
# A[0] * B[4]
mulx 32(%rbp), %rax, %r10
adcxq %rax, %r9
# A[0] * B[5]
mulx 40(%rbp), %rax, %r8
movq %r9, 32(%rbx)
adcxq %rax, %r10
movq %r10, 40(%rbx)
# A[0] * B[6]
mulx 48(%rbp), %rax, %r9
adcxq %rax, %r8
# A[0] * B[7]
mulx 56(%rbp), %rax, %r10
movq %r8, 48(%rbx)
adcxq %rax, %r9
movq %r9, 56(%rbx)
# A[0] * B[8]
mulx 64(%rbp), %rax, %r8
adcxq %rax, %r10
# A[0] * B[9]
mulx 72(%rbp), %rax, %r9
movq %r10, 64(%rbx)
adcxq %rax, %r8
movq %r8, 72(%rbx)
# A[0] * B[10]
mulx 80(%rbp), %rax, %r10
adcxq %rax, %r9
# A[0] * B[11]
mulx 88(%rbp), %rax, %r8
movq %r9, 80(%rbx)
adcxq %rax, %r10
adcxq %r12, %r8
movq %r12, %r11
adcxq %r12, %r11
movq %r10, 88(%rbx)
movq %r8, (%rdi)
movq 8(%rsi), %rdx
movq 8(%rbx), %r9
movq 16(%rbx), %r10
movq 24(%rbx), %r8
# A[1] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[1] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 8(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 16(%rbx)
movq 32(%rbx), %r9
movq 40(%rbx), %r10
# A[1] * B[2]
mulx 16(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[1] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r8, 24(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 32(%rbx)
movq 48(%rbx), %r8
movq 56(%rbx), %r9
# A[1] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[1] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 40(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 48(%rbx)
movq 64(%rbx), %r10
movq 72(%rbx), %r8
# A[1] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[1] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, 56(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 64(%rbx)
movq 80(%rbx), %r9
movq 88(%rbx), %r10
# A[1] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[1] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r8, 72(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 80(%rbx)
movq (%rdi), %r8
# A[1] * B[10]
mulx 80(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[1] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r10, 88(%rbx)
movq %r12, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r11, %r9
movq %r12, %r11
adoxq %r12, %r11
adcxq %r12, %r11
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq 16(%rsi), %rdx
movq 16(%rbx), %r10
movq 24(%rbx), %r8
movq 32(%rbx), %r9
# A[2] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[2] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 16(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 24(%rbx)
movq 40(%rbx), %r10
movq 48(%rbx), %r8
# A[2] * B[2]
mulx 16(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[2] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r9, 32(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 40(%rbx)
movq 56(%rbx), %r9
movq 64(%rbx), %r10
# A[2] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[2] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, 48(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 56(%rbx)
movq 72(%rbx), %r8
movq 80(%rbx), %r9
# A[2] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[2] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 64(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 72(%rbx)
movq 88(%rbx), %r10
movq (%rdi), %r8
# A[2] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[2] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r9, 80(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 88(%rbx)
movq 8(%rdi), %r9
# A[2] * B[10]
mulx 80(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[2] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r8, (%rdi)
movq %r12, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r11, %r10
movq %r12, %r11
adoxq %r12, %r11
adcxq %r12, %r11
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq 24(%rsi), %rdx
movq 24(%rbx), %r8
movq 32(%rbx), %r9
movq 40(%rbx), %r10
# A[3] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[3] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r8, 24(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 32(%rbx)
movq 48(%rbx), %r8
movq 56(%rbx), %r9
# A[3] * B[2]
mulx 16(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[3] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r10, 40(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 48(%rbx)
movq 64(%rbx), %r10
movq 72(%rbx), %r8
# A[3] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[3] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r9, 56(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 64(%rbx)
movq 80(%rbx), %r9
movq 88(%rbx), %r10
# A[3] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[3] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r8, 72(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 80(%rbx)
movq (%rdi), %r8
movq 8(%rdi), %r9
# A[3] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[3] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r10, 88(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, (%rdi)
movq 16(%rdi), %r10
# A[3] * B[10]
mulx 80(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[3] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r9, 8(%rdi)
movq %r12, %r8
adcxq %rax, %r10
adoxq %rcx, %r8
adcxq %r11, %r8
movq %r12, %r11
adoxq %r12, %r11
adcxq %r12, %r11
movq %r10, 16(%rdi)
movq %r8, 24(%rdi)
movq 32(%rsi), %rdx
movq 32(%rbx), %r9
movq 40(%rbx), %r10
movq 48(%rbx), %r8
# A[4] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[4] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 32(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 40(%rbx)
movq 56(%rbx), %r9
movq 64(%rbx), %r10
# A[4] * B[2]
mulx 16(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[4] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r8, 48(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 56(%rbx)
movq 72(%rbx), %r8
movq 80(%rbx), %r9
# A[4] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[4] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 64(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 72(%rbx)
movq 88(%rbx), %r10
movq (%rdi), %r8
# A[4] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[4] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, 80(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 88(%rbx)
movq 8(%rdi), %r9
movq 16(%rdi), %r10
# A[4] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[4] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r8, (%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 8(%rdi)
movq 24(%rdi), %r8
# A[4] * B[10]
mulx 80(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[4] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r10, 16(%rdi)
movq %r12, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r11, %r9
movq %r12, %r11
adoxq %r12, %r11
adcxq %r12, %r11
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
movq 40(%rsi), %rdx
movq 40(%rbx), %r10
movq 48(%rbx), %r8
movq 56(%rbx), %r9
# A[5] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[5] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 40(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 48(%rbx)
movq 64(%rbx), %r10
movq 72(%rbx), %r8
# A[5] * B[2]
mulx 16(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[5] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r9, 56(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 64(%rbx)
movq 80(%rbx), %r9
movq 88(%rbx), %r10
# A[5] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, 72(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 80(%rbx)
movq (%rdi), %r8
movq 8(%rdi), %r9
# A[5] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[5] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 88(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, (%rdi)
movq 16(%rdi), %r10
movq 24(%rdi), %r8
# A[5] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[5] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r9, 8(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 16(%rdi)
movq 32(%rdi), %r9
# A[5] * B[10]
mulx 80(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r8, 24(%rdi)
movq %r12, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r11, %r10
movq %r12, %r11
adoxq %r12, %r11
adcxq %r12, %r11
movq %r9, 32(%rdi)
movq %r10, 40(%rdi)
movq 48(%rsi), %rdx
movq 48(%rbx), %r8
movq 56(%rbx), %r9
movq 64(%rbx), %r10
# A[6] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[6] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r8, 48(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 56(%rbx)
movq 72(%rbx), %r8
movq 80(%rbx), %r9
# A[6] * B[2]
mulx 16(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[6] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r10, 64(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 72(%rbx)
movq 88(%rbx), %r10
movq (%rdi), %r8
# A[6] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[6] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r9, 80(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 88(%rbx)
movq 8(%rdi), %r9
movq 16(%rdi), %r10
# A[6] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[6] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r8, (%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 8(%rdi)
movq 24(%rdi), %r8
movq 32(%rdi), %r9
# A[6] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[6] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r10, 16(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 24(%rdi)
movq 40(%rdi), %r10
# A[6] * B[10]
mulx 80(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[6] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r9, 32(%rdi)
movq %r12, %r8
adcxq %rax, %r10
adoxq %rcx, %r8
adcxq %r11, %r8
movq %r12, %r11
adoxq %r12, %r11
adcxq %r12, %r11
movq %r10, 40(%rdi)
movq %r8, 48(%rdi)
movq 56(%rsi), %rdx
movq 56(%rbx), %r9
movq 64(%rbx), %r10
movq 72(%rbx), %r8
# A[7] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[7] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 56(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 64(%rbx)
movq 80(%rbx), %r9
movq 88(%rbx), %r10
# A[7] * B[2]
mulx 16(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[7] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r8, 72(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 80(%rbx)
movq (%rdi), %r8
movq 8(%rdi), %r9
# A[7] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[7] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 88(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, (%rdi)
movq 16(%rdi), %r10
movq 24(%rdi), %r8
# A[7] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[7] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, 8(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 16(%rdi)
movq 32(%rdi), %r9
movq 40(%rdi), %r10
# A[7] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[7] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r8, 24(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 32(%rdi)
movq 48(%rdi), %r8
# A[7] * B[10]
mulx 80(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[7] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r10, 40(%rdi)
movq %r12, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r11, %r9
movq %r12, %r11
adoxq %r12, %r11
adcxq %r12, %r11
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rsi), %rdx
movq 64(%rbx), %r10
movq 72(%rbx), %r8
movq 80(%rbx), %r9
# A[8] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[8] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 64(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 72(%rbx)
movq 88(%rbx), %r10
movq (%rdi), %r8
# A[8] * B[2]
mulx 16(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[8] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r9, 80(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 88(%rbx)
movq 8(%rdi), %r9
movq 16(%rdi), %r10
# A[8] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[8] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, (%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 8(%rdi)
movq 24(%rdi), %r8
movq 32(%rdi), %r9
# A[8] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[8] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 16(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 24(%rdi)
movq 40(%rdi), %r10
movq 48(%rdi), %r8
# A[8] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[8] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r9, 32(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 40(%rdi)
movq 56(%rdi), %r9
# A[8] * B[10]
mulx 80(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[8] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r8, 48(%rdi)
movq %r12, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r11, %r10
movq %r12, %r11
adoxq %r12, %r11
adcxq %r12, %r11
movq %r9, 56(%rdi)
movq %r10, 64(%rdi)
movq 72(%rsi), %rdx
movq 72(%rbx), %r8
movq 80(%rbx), %r9
movq 88(%rbx), %r10
# A[9] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[9] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r8, 72(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 80(%rbx)
movq (%rdi), %r8
movq 8(%rdi), %r9
# A[9] * B[2]
mulx 16(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[9] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r10, 88(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, (%rdi)
movq 16(%rdi), %r10
movq 24(%rdi), %r8
# A[9] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[9] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r9, 8(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 16(%rdi)
movq 32(%rdi), %r9
movq 40(%rdi), %r10
# A[9] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[9] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r8, 24(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 32(%rdi)
movq 48(%rdi), %r8
movq 56(%rdi), %r9
# A[9] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[9] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r10, 40(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 48(%rdi)
movq 64(%rdi), %r10
# A[9] * B[10]
mulx 80(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[9] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r9, 56(%rdi)
movq %r12, %r8
adcxq %rax, %r10
adoxq %rcx, %r8
adcxq %r11, %r8
movq %r12, %r11
adoxq %r12, %r11
adcxq %r12, %r11
movq %r10, 64(%rdi)
movq %r8, 72(%rdi)
movq 80(%rsi), %rdx
movq 80(%rbx), %r9
movq 88(%rbx), %r10
movq (%rdi), %r8
# A[10] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[10] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 80(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 88(%rbx)
movq 8(%rdi), %r9
movq 16(%rdi), %r10
# A[10] * B[2]
mulx 16(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[10] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r8, (%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 8(%rdi)
movq 24(%rdi), %r8
movq 32(%rdi), %r9
# A[10] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[10] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 16(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 24(%rdi)
movq 40(%rdi), %r10
movq 48(%rdi), %r8
# A[10] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[10] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, 32(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 40(%rdi)
movq 56(%rdi), %r9
movq 64(%rdi), %r10
# A[10] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[10] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r8, 48(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 56(%rdi)
movq 72(%rdi), %r8
# A[10] * B[10]
mulx 80(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[10] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r10, 64(%rdi)
movq %r12, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r11, %r9
movq %r12, %r11
adoxq %r12, %r11
adcxq %r12, %r11
movq %r8, 72(%rdi)
movq %r9, 80(%rdi)
movq 88(%rsi), %rdx
movq 88(%rbx), %r10
movq (%rdi), %r8
movq 8(%rdi), %r9
# A[11] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[11] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 88(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, (%rdi)
movq 16(%rdi), %r10
movq 24(%rdi), %r8
# A[11] * B[2]
mulx 16(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[11] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r9, 8(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 16(%rdi)
movq 32(%rdi), %r9
movq 40(%rdi), %r10
# A[11] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[11] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, 24(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 32(%rdi)
movq 48(%rdi), %r8
movq 56(%rdi), %r9
# A[11] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[11] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 40(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 48(%rdi)
movq 64(%rdi), %r10
movq 72(%rdi), %r8
# A[11] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[11] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r9, 56(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r10, 64(%rdi)
movq 80(%rdi), %r9
# A[11] * B[10]
mulx 80(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[11] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r8, 72(%rdi)
movq %r12, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r11, %r10
movq %r9, 80(%rdi)
movq %r10, 88(%rdi)
subq $0x60, %rdi
cmpq %rdi, %rsi
je L_start_3072_mul_avx2_12
cmpq %rdi, %rbp
jne L_end_3072_mul_avx2_12
L_start_3072_mul_avx2_12:
vmovdqu (%rbx), %xmm0
vmovups %xmm0, (%rdi)
vmovdqu 16(%rbx), %xmm0
vmovups %xmm0, 16(%rdi)
vmovdqu 32(%rbx), %xmm0
vmovups %xmm0, 32(%rdi)
vmovdqu 48(%rbx), %xmm0
vmovups %xmm0, 48(%rdi)
vmovdqu 64(%rbx), %xmm0
vmovups %xmm0, 64(%rdi)
vmovdqu 80(%rbx), %xmm0
vmovups %xmm0, 80(%rdi)
L_end_3072_mul_avx2_12:
addq $0x60, %rsp
popq %r12
popq %rbp
popq %rbx
repz retq
#ifndef __APPLE__
.size sp_3072_mul_avx2_12,.-sp_3072_mul_avx2_12
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Add b to a into r. (r = a + b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_add_12
.type sp_3072_add_12,@function
.align 16
sp_3072_add_12:
#else
.section __TEXT,__text
.globl _sp_3072_add_12
.p2align 4
_sp_3072_add_12:
#endif /* __APPLE__ */
# Add
movq (%rsi), %rcx
xorq %rax, %rax
addq (%rdx), %rcx
movq 8(%rsi), %r8
movq %rcx, (%rdi)
adcq 8(%rdx), %r8
movq 16(%rsi), %rcx
movq %r8, 8(%rdi)
adcq 16(%rdx), %rcx
movq 24(%rsi), %r8
movq %rcx, 16(%rdi)
adcq 24(%rdx), %r8
movq 32(%rsi), %rcx
movq %r8, 24(%rdi)
adcq 32(%rdx), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%rdi)
adcq 40(%rdx), %r8
movq 48(%rsi), %rcx
movq %r8, 40(%rdi)
adcq 48(%rdx), %rcx
movq 56(%rsi), %r8
movq %rcx, 48(%rdi)
adcq 56(%rdx), %r8
movq 64(%rsi), %rcx
movq %r8, 56(%rdi)
adcq 64(%rdx), %rcx
movq 72(%rsi), %r8
movq %rcx, 64(%rdi)
adcq 72(%rdx), %r8
movq 80(%rsi), %rcx
movq %r8, 72(%rdi)
adcq 80(%rdx), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%rdi)
adcq 88(%rdx), %r8
movq %r8, 88(%rdi)
adcq $0x00, %rax
repz retq
#ifndef __APPLE__
.size sp_3072_add_12,.-sp_3072_add_12
#endif /* __APPLE__ */
/* Sub b from a into a. (a -= b)
*
* a A single precision integer and result.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_sub_in_place_24
.type sp_3072_sub_in_place_24,@function
.align 16
sp_3072_sub_in_place_24:
#else
.section __TEXT,__text
.globl _sp_3072_sub_in_place_24
.p2align 4
_sp_3072_sub_in_place_24:
#endif /* __APPLE__ */
movq (%rdi), %rdx
subq (%rsi), %rdx
movq 8(%rdi), %rcx
movq %rdx, (%rdi)
sbbq 8(%rsi), %rcx
movq 16(%rdi), %rdx
movq %rcx, 8(%rdi)
sbbq 16(%rsi), %rdx
movq 24(%rdi), %rcx
movq %rdx, 16(%rdi)
sbbq 24(%rsi), %rcx
movq 32(%rdi), %rdx
movq %rcx, 24(%rdi)
sbbq 32(%rsi), %rdx
movq 40(%rdi), %rcx
movq %rdx, 32(%rdi)
sbbq 40(%rsi), %rcx
movq 48(%rdi), %rdx
movq %rcx, 40(%rdi)
sbbq 48(%rsi), %rdx
movq 56(%rdi), %rcx
movq %rdx, 48(%rdi)
sbbq 56(%rsi), %rcx
movq 64(%rdi), %rdx
movq %rcx, 56(%rdi)
sbbq 64(%rsi), %rdx
movq 72(%rdi), %rcx
movq %rdx, 64(%rdi)
sbbq 72(%rsi), %rcx
movq 80(%rdi), %rdx
movq %rcx, 72(%rdi)
sbbq 80(%rsi), %rdx
movq 88(%rdi), %rcx
movq %rdx, 80(%rdi)
sbbq 88(%rsi), %rcx
movq 96(%rdi), %rdx
movq %rcx, 88(%rdi)
sbbq 96(%rsi), %rdx
movq 104(%rdi), %rcx
movq %rdx, 96(%rdi)
sbbq 104(%rsi), %rcx
movq 112(%rdi), %rdx
movq %rcx, 104(%rdi)
sbbq 112(%rsi), %rdx
movq 120(%rdi), %rcx
movq %rdx, 112(%rdi)
sbbq 120(%rsi), %rcx
movq 128(%rdi), %rdx
movq %rcx, 120(%rdi)
sbbq 128(%rsi), %rdx
movq 136(%rdi), %rcx
movq %rdx, 128(%rdi)
sbbq 136(%rsi), %rcx
movq 144(%rdi), %rdx
movq %rcx, 136(%rdi)
sbbq 144(%rsi), %rdx
movq 152(%rdi), %rcx
movq %rdx, 144(%rdi)
sbbq 152(%rsi), %rcx
movq 160(%rdi), %rdx
movq %rcx, 152(%rdi)
sbbq 160(%rsi), %rdx
movq 168(%rdi), %rcx
movq %rdx, 160(%rdi)
sbbq 168(%rsi), %rcx
movq 176(%rdi), %rdx
movq %rcx, 168(%rdi)
sbbq 176(%rsi), %rdx
movq 184(%rdi), %rcx
movq %rdx, 176(%rdi)
sbbq 184(%rsi), %rcx
movq %rcx, 184(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_3072_sub_in_place_24,.-sp_3072_sub_in_place_24
#endif /* __APPLE__ */
/* Add b to a into r. (r = a + b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_add_24
.type sp_3072_add_24,@function
.align 16
sp_3072_add_24:
#else
.section __TEXT,__text
.globl _sp_3072_add_24
.p2align 4
_sp_3072_add_24:
#endif /* __APPLE__ */
# Add
movq (%rsi), %rcx
xorq %rax, %rax
addq (%rdx), %rcx
movq 8(%rsi), %r8
movq %rcx, (%rdi)
adcq 8(%rdx), %r8
movq 16(%rsi), %rcx
movq %r8, 8(%rdi)
adcq 16(%rdx), %rcx
movq 24(%rsi), %r8
movq %rcx, 16(%rdi)
adcq 24(%rdx), %r8
movq 32(%rsi), %rcx
movq %r8, 24(%rdi)
adcq 32(%rdx), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%rdi)
adcq 40(%rdx), %r8
movq 48(%rsi), %rcx
movq %r8, 40(%rdi)
adcq 48(%rdx), %rcx
movq 56(%rsi), %r8
movq %rcx, 48(%rdi)
adcq 56(%rdx), %r8
movq 64(%rsi), %rcx
movq %r8, 56(%rdi)
adcq 64(%rdx), %rcx
movq 72(%rsi), %r8
movq %rcx, 64(%rdi)
adcq 72(%rdx), %r8
movq 80(%rsi), %rcx
movq %r8, 72(%rdi)
adcq 80(%rdx), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%rdi)
adcq 88(%rdx), %r8
movq 96(%rsi), %rcx
movq %r8, 88(%rdi)
adcq 96(%rdx), %rcx
movq 104(%rsi), %r8
movq %rcx, 96(%rdi)
adcq 104(%rdx), %r8
movq 112(%rsi), %rcx
movq %r8, 104(%rdi)
adcq 112(%rdx), %rcx
movq 120(%rsi), %r8
movq %rcx, 112(%rdi)
adcq 120(%rdx), %r8
movq 128(%rsi), %rcx
movq %r8, 120(%rdi)
adcq 128(%rdx), %rcx
movq 136(%rsi), %r8
movq %rcx, 128(%rdi)
adcq 136(%rdx), %r8
movq 144(%rsi), %rcx
movq %r8, 136(%rdi)
adcq 144(%rdx), %rcx
movq 152(%rsi), %r8
movq %rcx, 144(%rdi)
adcq 152(%rdx), %r8
movq 160(%rsi), %rcx
movq %r8, 152(%rdi)
adcq 160(%rdx), %rcx
movq 168(%rsi), %r8
movq %rcx, 160(%rdi)
adcq 168(%rdx), %r8
movq 176(%rsi), %rcx
movq %r8, 168(%rdi)
adcq 176(%rdx), %rcx
movq 184(%rsi), %r8
movq %rcx, 176(%rdi)
adcq 184(%rdx), %r8
movq %r8, 184(%rdi)
adcq $0x00, %rax
repz retq
#ifndef __APPLE__
.size sp_3072_add_24,.-sp_3072_add_24
#endif /* __APPLE__ */
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mul_24
.type sp_3072_mul_24,@function
.align 16
sp_3072_mul_24:
#else
.section __TEXT,__text
.globl _sp_3072_mul_24
.p2align 4
_sp_3072_mul_24:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x268, %rsp
movq %rdi, 576(%rsp)
movq %rsi, 584(%rsp)
movq %rdx, 592(%rsp)
leaq 384(%rsp), %r10
leaq 96(%rsi), %r12
# Add
movq (%rsi), %rax
xorq %r13, %r13
addq (%r12), %rax
movq 8(%rsi), %rcx
movq %rax, (%r10)
adcq 8(%r12), %rcx
movq 16(%rsi), %r8
movq %rcx, 8(%r10)
adcq 16(%r12), %r8
movq 24(%rsi), %rax
movq %r8, 16(%r10)
adcq 24(%r12), %rax
movq 32(%rsi), %rcx
movq %rax, 24(%r10)
adcq 32(%r12), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%r10)
adcq 40(%r12), %r8
movq 48(%rsi), %rax
movq %r8, 40(%r10)
adcq 48(%r12), %rax
movq 56(%rsi), %rcx
movq %rax, 48(%r10)
adcq 56(%r12), %rcx
movq 64(%rsi), %r8
movq %rcx, 56(%r10)
adcq 64(%r12), %r8
movq 72(%rsi), %rax
movq %r8, 64(%r10)
adcq 72(%r12), %rax
movq 80(%rsi), %rcx
movq %rax, 72(%r10)
adcq 80(%r12), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%r10)
adcq 88(%r12), %r8
movq %r8, 88(%r10)
adcq $0x00, %r13
movq %r13, 600(%rsp)
leaq 480(%rsp), %r11
leaq 96(%rdx), %r12
# Add
movq (%rdx), %rax
xorq %r14, %r14
addq (%r12), %rax
movq 8(%rdx), %rcx
movq %rax, (%r11)
adcq 8(%r12), %rcx
movq 16(%rdx), %r8
movq %rcx, 8(%r11)
adcq 16(%r12), %r8
movq 24(%rdx), %rax
movq %r8, 16(%r11)
adcq 24(%r12), %rax
movq 32(%rdx), %rcx
movq %rax, 24(%r11)
adcq 32(%r12), %rcx
movq 40(%rdx), %r8
movq %rcx, 32(%r11)
adcq 40(%r12), %r8
movq 48(%rdx), %rax
movq %r8, 40(%r11)
adcq 48(%r12), %rax
movq 56(%rdx), %rcx
movq %rax, 48(%r11)
adcq 56(%r12), %rcx
movq 64(%rdx), %r8
movq %rcx, 56(%r11)
adcq 64(%r12), %r8
movq 72(%rdx), %rax
movq %r8, 64(%r11)
adcq 72(%r12), %rax
movq 80(%rdx), %rcx
movq %rax, 72(%r11)
adcq 80(%r12), %rcx
movq 88(%rdx), %r8
movq %rcx, 80(%r11)
adcq 88(%r12), %r8
movq %r8, 88(%r11)
adcq $0x00, %r14
movq %r14, 608(%rsp)
movq %r11, %rdx
movq %r10, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_3072_mul_12@plt
#else
callq _sp_3072_mul_12
#endif /* __APPLE__ */
movq 592(%rsp), %rdx
movq 584(%rsp), %rsi
leaq 192(%rsp), %rdi
addq $0x60, %rdx
addq $0x60, %rsi
#ifndef __APPLE__
callq sp_3072_mul_12@plt
#else
callq _sp_3072_mul_12
#endif /* __APPLE__ */
movq 592(%rsp), %rdx
movq 584(%rsp), %rsi
movq 576(%rsp), %rdi
#ifndef __APPLE__
callq sp_3072_mul_12@plt
#else
callq _sp_3072_mul_12
#endif /* __APPLE__ */
#ifdef _WIN64
movq 592(%rsp), %rdx
movq 584(%rsp), %rsi
movq 576(%rsp), %rdi
#endif /* _WIN64 */
movq 600(%rsp), %r13
movq 608(%rsp), %r14
movq 576(%rsp), %r15
movq %r13, %r9
leaq 384(%rsp), %r10
leaq 480(%rsp), %r11
andq %r14, %r9
negq %r13
negq %r14
addq $0xc0, %r15
movq (%r10), %rax
movq (%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, (%r10)
movq %rcx, (%r11)
movq 8(%r10), %rax
movq 8(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 8(%r10)
movq %rcx, 8(%r11)
movq 16(%r10), %rax
movq 16(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 16(%r10)
movq %rcx, 16(%r11)
movq 24(%r10), %rax
movq 24(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 24(%r10)
movq %rcx, 24(%r11)
movq 32(%r10), %rax
movq 32(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 32(%r10)
movq %rcx, 32(%r11)
movq 40(%r10), %rax
movq 40(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 40(%r10)
movq %rcx, 40(%r11)
movq 48(%r10), %rax
movq 48(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 48(%r10)
movq %rcx, 48(%r11)
movq 56(%r10), %rax
movq 56(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 56(%r10)
movq %rcx, 56(%r11)
movq 64(%r10), %rax
movq 64(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 64(%r10)
movq %rcx, 64(%r11)
movq 72(%r10), %rax
movq 72(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 72(%r10)
movq %rcx, 72(%r11)
movq 80(%r10), %rax
movq 80(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 80(%r10)
movq %rcx, 80(%r11)
movq 88(%r10), %rax
movq 88(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 88(%r10)
movq %rcx, 88(%r11)
movq (%r10), %rax
addq (%r11), %rax
movq 8(%r10), %rcx
movq %rax, (%r15)
adcq 8(%r11), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r15)
adcq 16(%r11), %r8
movq 24(%r10), %rax
movq %r8, 16(%r15)
adcq 24(%r11), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r15)
adcq 32(%r11), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r15)
adcq 40(%r11), %r8
movq 48(%r10), %rax
movq %r8, 40(%r15)
adcq 48(%r11), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r15)
adcq 56(%r11), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r15)
adcq 64(%r11), %r8
movq 72(%r10), %rax
movq %r8, 64(%r15)
adcq 72(%r11), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r15)
adcq 80(%r11), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r15)
adcq 88(%r11), %r8
movq %r8, 88(%r15)
adcq $0x00, %r9
leaq 192(%rsp), %r11
movq %rsp, %r10
movq (%r10), %rax
subq (%r11), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%r11), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%r11), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%r11), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%r11), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%r11), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%r11), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%r11), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%r11), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%r11), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%r11), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%r11), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%r11), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%r11), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%r11), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%r11), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%r11), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%r11), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%r11), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%r11), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%r11), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%r11), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%r11), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%r11), %r8
movq %r8, 184(%r10)
sbbq $0x00, %r9
movq (%r10), %rax
subq (%rdi), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%rdi), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%rdi), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%rdi), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%rdi), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%rdi), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%rdi), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%rdi), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%rdi), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%rdi), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%rdi), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%rdi), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%rdi), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%rdi), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%rdi), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%rdi), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%rdi), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%rdi), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%rdi), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%rdi), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%rdi), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%rdi), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%rdi), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%rdi), %r8
movq %r8, 184(%r10)
sbbq $0x00, %r9
subq $0x60, %r15
# Add
movq (%r15), %rax
addq (%r10), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r10), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r10), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r10), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r10), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r10), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r10), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r10), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r10), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r10), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r10), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r10), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r10), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r10), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r10), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r10), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r10), %rcx
movq 136(%r15), %r8
movq %rcx, 128(%r15)
adcq 136(%r10), %r8
movq 144(%r15), %rax
movq %r8, 136(%r15)
adcq 144(%r10), %rax
movq 152(%r15), %rcx
movq %rax, 144(%r15)
adcq 152(%r10), %rcx
movq 160(%r15), %r8
movq %rcx, 152(%r15)
adcq 160(%r10), %r8
movq 168(%r15), %rax
movq %r8, 160(%r15)
adcq 168(%r10), %rax
movq 176(%r15), %rcx
movq %rax, 168(%r15)
adcq 176(%r10), %rcx
movq 184(%r15), %r8
movq %rcx, 176(%r15)
adcq 184(%r10), %r8
movq %r8, 184(%r15)
adcq $0x00, %r9
movq %r9, 288(%rdi)
addq $0x60, %r15
# Add
movq (%r15), %rax
addq (%r11), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r11), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r11), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r11), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r11), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r11), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r11), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r11), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r11), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r11), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r11), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r11), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r11), %rax
movq %rax, 96(%r15)
# Add to zero
movq 104(%r11), %rax
adcq $0x00, %rax
movq 112(%r11), %rcx
movq %rax, 104(%r15)
adcq $0x00, %rcx
movq 120(%r11), %r8
movq %rcx, 112(%r15)
adcq $0x00, %r8
movq 128(%r11), %rax
movq %r8, 120(%r15)
adcq $0x00, %rax
movq 136(%r11), %rcx
movq %rax, 128(%r15)
adcq $0x00, %rcx
movq 144(%r11), %r8
movq %rcx, 136(%r15)
adcq $0x00, %r8
movq 152(%r11), %rax
movq %r8, 144(%r15)
adcq $0x00, %rax
movq 160(%r11), %rcx
movq %rax, 152(%r15)
adcq $0x00, %rcx
movq 168(%r11), %r8
movq %rcx, 160(%r15)
adcq $0x00, %r8
movq 176(%r11), %rax
movq %r8, 168(%r15)
adcq $0x00, %rax
movq 184(%r11), %rcx
movq %rax, 176(%r15)
adcq $0x00, %rcx
movq %rcx, 184(%r15)
addq $0x268, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_3072_mul_24,.-sp_3072_mul_24
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mul_avx2_24
.type sp_3072_mul_avx2_24,@function
.align 16
sp_3072_mul_avx2_24:
#else
.section __TEXT,__text
.globl _sp_3072_mul_avx2_24
.p2align 4
_sp_3072_mul_avx2_24:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x268, %rsp
movq %rdi, 576(%rsp)
movq %rsi, 584(%rsp)
movq %rdx, 592(%rsp)
leaq 384(%rsp), %r10
leaq 96(%rsi), %r12
# Add
movq (%rsi), %rax
xorq %r13, %r13
addq (%r12), %rax
movq 8(%rsi), %rcx
movq %rax, (%r10)
adcq 8(%r12), %rcx
movq 16(%rsi), %r8
movq %rcx, 8(%r10)
adcq 16(%r12), %r8
movq 24(%rsi), %rax
movq %r8, 16(%r10)
adcq 24(%r12), %rax
movq 32(%rsi), %rcx
movq %rax, 24(%r10)
adcq 32(%r12), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%r10)
adcq 40(%r12), %r8
movq 48(%rsi), %rax
movq %r8, 40(%r10)
adcq 48(%r12), %rax
movq 56(%rsi), %rcx
movq %rax, 48(%r10)
adcq 56(%r12), %rcx
movq 64(%rsi), %r8
movq %rcx, 56(%r10)
adcq 64(%r12), %r8
movq 72(%rsi), %rax
movq %r8, 64(%r10)
adcq 72(%r12), %rax
movq 80(%rsi), %rcx
movq %rax, 72(%r10)
adcq 80(%r12), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%r10)
adcq 88(%r12), %r8
movq %r8, 88(%r10)
adcq $0x00, %r13
movq %r13, 600(%rsp)
leaq 480(%rsp), %r11
leaq 96(%rdx), %r12
# Add
movq (%rdx), %rax
xorq %r14, %r14
addq (%r12), %rax
movq 8(%rdx), %rcx
movq %rax, (%r11)
adcq 8(%r12), %rcx
movq 16(%rdx), %r8
movq %rcx, 8(%r11)
adcq 16(%r12), %r8
movq 24(%rdx), %rax
movq %r8, 16(%r11)
adcq 24(%r12), %rax
movq 32(%rdx), %rcx
movq %rax, 24(%r11)
adcq 32(%r12), %rcx
movq 40(%rdx), %r8
movq %rcx, 32(%r11)
adcq 40(%r12), %r8
movq 48(%rdx), %rax
movq %r8, 40(%r11)
adcq 48(%r12), %rax
movq 56(%rdx), %rcx
movq %rax, 48(%r11)
adcq 56(%r12), %rcx
movq 64(%rdx), %r8
movq %rcx, 56(%r11)
adcq 64(%r12), %r8
movq 72(%rdx), %rax
movq %r8, 64(%r11)
adcq 72(%r12), %rax
movq 80(%rdx), %rcx
movq %rax, 72(%r11)
adcq 80(%r12), %rcx
movq 88(%rdx), %r8
movq %rcx, 80(%r11)
adcq 88(%r12), %r8
movq %r8, 88(%r11)
adcq $0x00, %r14
movq %r14, 608(%rsp)
movq %r11, %rdx
movq %r10, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_3072_mul_avx2_12@plt
#else
callq _sp_3072_mul_avx2_12
#endif /* __APPLE__ */
movq 592(%rsp), %rdx
movq 584(%rsp), %rsi
leaq 192(%rsp), %rdi
addq $0x60, %rdx
addq $0x60, %rsi
#ifndef __APPLE__
callq sp_3072_mul_avx2_12@plt
#else
callq _sp_3072_mul_avx2_12
#endif /* __APPLE__ */
movq 592(%rsp), %rdx
movq 584(%rsp), %rsi
movq 576(%rsp), %rdi
#ifndef __APPLE__
callq sp_3072_mul_avx2_12@plt
#else
callq _sp_3072_mul_avx2_12
#endif /* __APPLE__ */
#ifdef _WIN64
movq 592(%rsp), %rdx
movq 584(%rsp), %rsi
movq 576(%rsp), %rdi
#endif /* _WIN64 */
movq 600(%rsp), %r13
movq 608(%rsp), %r14
movq 576(%rsp), %r15
movq %r13, %r9
leaq 384(%rsp), %r10
leaq 480(%rsp), %r11
andq %r14, %r9
negq %r13
negq %r14
addq $0xc0, %r15
movq (%r10), %rax
movq (%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
addq %rcx, %rax
movq 8(%r10), %rcx
movq 8(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, (%r15)
adcq %r8, %rcx
movq 16(%r10), %r8
movq 16(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 8(%r15)
adcq %rax, %r8
movq 24(%r10), %rax
movq 24(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 16(%r15)
adcq %rcx, %rax
movq 32(%r10), %rcx
movq 32(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 24(%r15)
adcq %r8, %rcx
movq 40(%r10), %r8
movq 40(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 32(%r15)
adcq %rax, %r8
movq 48(%r10), %rax
movq 48(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 40(%r15)
adcq %rcx, %rax
movq 56(%r10), %rcx
movq 56(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 48(%r15)
adcq %r8, %rcx
movq 64(%r10), %r8
movq 64(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 56(%r15)
adcq %rax, %r8
movq 72(%r10), %rax
movq 72(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 64(%r15)
adcq %rcx, %rax
movq 80(%r10), %rcx
movq 80(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 72(%r15)
adcq %r8, %rcx
movq 88(%r10), %r8
movq 88(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 80(%r15)
adcq %rax, %r8
movq %r8, 88(%r15)
adcq $0x00, %r9
leaq 192(%rsp), %r11
movq %rsp, %r10
movq (%r10), %rax
subq (%r11), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%r11), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%r11), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%r11), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%r11), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%r11), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%r11), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%r11), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%r11), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%r11), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%r11), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%r11), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%r11), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%r11), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%r11), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%r11), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%r11), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%r11), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%r11), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%r11), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%r11), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%r11), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%r11), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%r11), %r8
movq %r8, 184(%r10)
sbbq $0x00, %r9
movq (%r10), %rax
subq (%rdi), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%rdi), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%rdi), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%rdi), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%rdi), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%rdi), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%rdi), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%rdi), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%rdi), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%rdi), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%rdi), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%rdi), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%rdi), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%rdi), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%rdi), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%rdi), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%rdi), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%rdi), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%rdi), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%rdi), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%rdi), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%rdi), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%rdi), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%rdi), %r8
movq %r8, 184(%r10)
sbbq $0x00, %r9
subq $0x60, %r15
# Add
movq (%r15), %rax
addq (%r10), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r10), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r10), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r10), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r10), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r10), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r10), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r10), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r10), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r10), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r10), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r10), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r10), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r10), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r10), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r10), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r10), %rcx
movq 136(%r15), %r8
movq %rcx, 128(%r15)
adcq 136(%r10), %r8
movq 144(%r15), %rax
movq %r8, 136(%r15)
adcq 144(%r10), %rax
movq 152(%r15), %rcx
movq %rax, 144(%r15)
adcq 152(%r10), %rcx
movq 160(%r15), %r8
movq %rcx, 152(%r15)
adcq 160(%r10), %r8
movq 168(%r15), %rax
movq %r8, 160(%r15)
adcq 168(%r10), %rax
movq 176(%r15), %rcx
movq %rax, 168(%r15)
adcq 176(%r10), %rcx
movq 184(%r15), %r8
movq %rcx, 176(%r15)
adcq 184(%r10), %r8
movq %r8, 184(%r15)
adcq $0x00, %r9
movq %r9, 288(%rdi)
addq $0x60, %r15
# Add
movq (%r15), %rax
addq (%r11), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r11), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r11), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r11), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r11), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r11), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r11), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r11), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r11), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r11), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r11), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r11), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r11), %rax
movq %rax, 96(%r15)
# Add to zero
movq 104(%r11), %rax
adcq $0x00, %rax
movq 112(%r11), %rcx
movq %rax, 104(%r15)
adcq $0x00, %rcx
movq 120(%r11), %r8
movq %rcx, 112(%r15)
adcq $0x00, %r8
movq 128(%r11), %rax
movq %r8, 120(%r15)
adcq $0x00, %rax
movq 136(%r11), %rcx
movq %rax, 128(%r15)
adcq $0x00, %rcx
movq 144(%r11), %r8
movq %rcx, 136(%r15)
adcq $0x00, %r8
movq 152(%r11), %rax
movq %r8, 144(%r15)
adcq $0x00, %rax
movq 160(%r11), %rcx
movq %rax, 152(%r15)
adcq $0x00, %rcx
movq 168(%r11), %r8
movq %rcx, 160(%r15)
adcq $0x00, %r8
movq 176(%r11), %rax
movq %r8, 168(%r15)
adcq $0x00, %rax
movq 184(%r11), %rcx
movq %rax, 176(%r15)
adcq $0x00, %rcx
movq %rcx, 184(%r15)
addq $0x268, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_3072_mul_avx2_24,.-sp_3072_mul_avx2_24
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Sub b from a into a. (a -= b)
*
* a A single precision integer and result.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_sub_in_place_48
.type sp_3072_sub_in_place_48,@function
.align 16
sp_3072_sub_in_place_48:
#else
.section __TEXT,__text
.globl _sp_3072_sub_in_place_48
.p2align 4
_sp_3072_sub_in_place_48:
#endif /* __APPLE__ */
movq (%rdi), %rdx
subq (%rsi), %rdx
movq 8(%rdi), %rcx
movq %rdx, (%rdi)
sbbq 8(%rsi), %rcx
movq 16(%rdi), %rdx
movq %rcx, 8(%rdi)
sbbq 16(%rsi), %rdx
movq 24(%rdi), %rcx
movq %rdx, 16(%rdi)
sbbq 24(%rsi), %rcx
movq 32(%rdi), %rdx
movq %rcx, 24(%rdi)
sbbq 32(%rsi), %rdx
movq 40(%rdi), %rcx
movq %rdx, 32(%rdi)
sbbq 40(%rsi), %rcx
movq 48(%rdi), %rdx
movq %rcx, 40(%rdi)
sbbq 48(%rsi), %rdx
movq 56(%rdi), %rcx
movq %rdx, 48(%rdi)
sbbq 56(%rsi), %rcx
movq 64(%rdi), %rdx
movq %rcx, 56(%rdi)
sbbq 64(%rsi), %rdx
movq 72(%rdi), %rcx
movq %rdx, 64(%rdi)
sbbq 72(%rsi), %rcx
movq 80(%rdi), %rdx
movq %rcx, 72(%rdi)
sbbq 80(%rsi), %rdx
movq 88(%rdi), %rcx
movq %rdx, 80(%rdi)
sbbq 88(%rsi), %rcx
movq 96(%rdi), %rdx
movq %rcx, 88(%rdi)
sbbq 96(%rsi), %rdx
movq 104(%rdi), %rcx
movq %rdx, 96(%rdi)
sbbq 104(%rsi), %rcx
movq 112(%rdi), %rdx
movq %rcx, 104(%rdi)
sbbq 112(%rsi), %rdx
movq 120(%rdi), %rcx
movq %rdx, 112(%rdi)
sbbq 120(%rsi), %rcx
movq 128(%rdi), %rdx
movq %rcx, 120(%rdi)
sbbq 128(%rsi), %rdx
movq 136(%rdi), %rcx
movq %rdx, 128(%rdi)
sbbq 136(%rsi), %rcx
movq 144(%rdi), %rdx
movq %rcx, 136(%rdi)
sbbq 144(%rsi), %rdx
movq 152(%rdi), %rcx
movq %rdx, 144(%rdi)
sbbq 152(%rsi), %rcx
movq 160(%rdi), %rdx
movq %rcx, 152(%rdi)
sbbq 160(%rsi), %rdx
movq 168(%rdi), %rcx
movq %rdx, 160(%rdi)
sbbq 168(%rsi), %rcx
movq 176(%rdi), %rdx
movq %rcx, 168(%rdi)
sbbq 176(%rsi), %rdx
movq 184(%rdi), %rcx
movq %rdx, 176(%rdi)
sbbq 184(%rsi), %rcx
movq 192(%rdi), %rdx
movq %rcx, 184(%rdi)
sbbq 192(%rsi), %rdx
movq 200(%rdi), %rcx
movq %rdx, 192(%rdi)
sbbq 200(%rsi), %rcx
movq 208(%rdi), %rdx
movq %rcx, 200(%rdi)
sbbq 208(%rsi), %rdx
movq 216(%rdi), %rcx
movq %rdx, 208(%rdi)
sbbq 216(%rsi), %rcx
movq 224(%rdi), %rdx
movq %rcx, 216(%rdi)
sbbq 224(%rsi), %rdx
movq 232(%rdi), %rcx
movq %rdx, 224(%rdi)
sbbq 232(%rsi), %rcx
movq 240(%rdi), %rdx
movq %rcx, 232(%rdi)
sbbq 240(%rsi), %rdx
movq 248(%rdi), %rcx
movq %rdx, 240(%rdi)
sbbq 248(%rsi), %rcx
movq 256(%rdi), %rdx
movq %rcx, 248(%rdi)
sbbq 256(%rsi), %rdx
movq 264(%rdi), %rcx
movq %rdx, 256(%rdi)
sbbq 264(%rsi), %rcx
movq 272(%rdi), %rdx
movq %rcx, 264(%rdi)
sbbq 272(%rsi), %rdx
movq 280(%rdi), %rcx
movq %rdx, 272(%rdi)
sbbq 280(%rsi), %rcx
movq 288(%rdi), %rdx
movq %rcx, 280(%rdi)
sbbq 288(%rsi), %rdx
movq 296(%rdi), %rcx
movq %rdx, 288(%rdi)
sbbq 296(%rsi), %rcx
movq 304(%rdi), %rdx
movq %rcx, 296(%rdi)
sbbq 304(%rsi), %rdx
movq 312(%rdi), %rcx
movq %rdx, 304(%rdi)
sbbq 312(%rsi), %rcx
movq 320(%rdi), %rdx
movq %rcx, 312(%rdi)
sbbq 320(%rsi), %rdx
movq 328(%rdi), %rcx
movq %rdx, 320(%rdi)
sbbq 328(%rsi), %rcx
movq 336(%rdi), %rdx
movq %rcx, 328(%rdi)
sbbq 336(%rsi), %rdx
movq 344(%rdi), %rcx
movq %rdx, 336(%rdi)
sbbq 344(%rsi), %rcx
movq 352(%rdi), %rdx
movq %rcx, 344(%rdi)
sbbq 352(%rsi), %rdx
movq 360(%rdi), %rcx
movq %rdx, 352(%rdi)
sbbq 360(%rsi), %rcx
movq 368(%rdi), %rdx
movq %rcx, 360(%rdi)
sbbq 368(%rsi), %rdx
movq 376(%rdi), %rcx
movq %rdx, 368(%rdi)
sbbq 376(%rsi), %rcx
movq %rcx, 376(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_3072_sub_in_place_48,.-sp_3072_sub_in_place_48
#endif /* __APPLE__ */
/* Add b to a into r. (r = a + b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_add_48
.type sp_3072_add_48,@function
.align 16
sp_3072_add_48:
#else
.section __TEXT,__text
.globl _sp_3072_add_48
.p2align 4
_sp_3072_add_48:
#endif /* __APPLE__ */
# Add
movq (%rsi), %rcx
xorq %rax, %rax
addq (%rdx), %rcx
movq 8(%rsi), %r8
movq %rcx, (%rdi)
adcq 8(%rdx), %r8
movq 16(%rsi), %rcx
movq %r8, 8(%rdi)
adcq 16(%rdx), %rcx
movq 24(%rsi), %r8
movq %rcx, 16(%rdi)
adcq 24(%rdx), %r8
movq 32(%rsi), %rcx
movq %r8, 24(%rdi)
adcq 32(%rdx), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%rdi)
adcq 40(%rdx), %r8
movq 48(%rsi), %rcx
movq %r8, 40(%rdi)
adcq 48(%rdx), %rcx
movq 56(%rsi), %r8
movq %rcx, 48(%rdi)
adcq 56(%rdx), %r8
movq 64(%rsi), %rcx
movq %r8, 56(%rdi)
adcq 64(%rdx), %rcx
movq 72(%rsi), %r8
movq %rcx, 64(%rdi)
adcq 72(%rdx), %r8
movq 80(%rsi), %rcx
movq %r8, 72(%rdi)
adcq 80(%rdx), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%rdi)
adcq 88(%rdx), %r8
movq 96(%rsi), %rcx
movq %r8, 88(%rdi)
adcq 96(%rdx), %rcx
movq 104(%rsi), %r8
movq %rcx, 96(%rdi)
adcq 104(%rdx), %r8
movq 112(%rsi), %rcx
movq %r8, 104(%rdi)
adcq 112(%rdx), %rcx
movq 120(%rsi), %r8
movq %rcx, 112(%rdi)
adcq 120(%rdx), %r8
movq 128(%rsi), %rcx
movq %r8, 120(%rdi)
adcq 128(%rdx), %rcx
movq 136(%rsi), %r8
movq %rcx, 128(%rdi)
adcq 136(%rdx), %r8
movq 144(%rsi), %rcx
movq %r8, 136(%rdi)
adcq 144(%rdx), %rcx
movq 152(%rsi), %r8
movq %rcx, 144(%rdi)
adcq 152(%rdx), %r8
movq 160(%rsi), %rcx
movq %r8, 152(%rdi)
adcq 160(%rdx), %rcx
movq 168(%rsi), %r8
movq %rcx, 160(%rdi)
adcq 168(%rdx), %r8
movq 176(%rsi), %rcx
movq %r8, 168(%rdi)
adcq 176(%rdx), %rcx
movq 184(%rsi), %r8
movq %rcx, 176(%rdi)
adcq 184(%rdx), %r8
movq 192(%rsi), %rcx
movq %r8, 184(%rdi)
adcq 192(%rdx), %rcx
movq 200(%rsi), %r8
movq %rcx, 192(%rdi)
adcq 200(%rdx), %r8
movq 208(%rsi), %rcx
movq %r8, 200(%rdi)
adcq 208(%rdx), %rcx
movq 216(%rsi), %r8
movq %rcx, 208(%rdi)
adcq 216(%rdx), %r8
movq 224(%rsi), %rcx
movq %r8, 216(%rdi)
adcq 224(%rdx), %rcx
movq 232(%rsi), %r8
movq %rcx, 224(%rdi)
adcq 232(%rdx), %r8
movq 240(%rsi), %rcx
movq %r8, 232(%rdi)
adcq 240(%rdx), %rcx
movq 248(%rsi), %r8
movq %rcx, 240(%rdi)
adcq 248(%rdx), %r8
movq 256(%rsi), %rcx
movq %r8, 248(%rdi)
adcq 256(%rdx), %rcx
movq 264(%rsi), %r8
movq %rcx, 256(%rdi)
adcq 264(%rdx), %r8
movq 272(%rsi), %rcx
movq %r8, 264(%rdi)
adcq 272(%rdx), %rcx
movq 280(%rsi), %r8
movq %rcx, 272(%rdi)
adcq 280(%rdx), %r8
movq 288(%rsi), %rcx
movq %r8, 280(%rdi)
adcq 288(%rdx), %rcx
movq 296(%rsi), %r8
movq %rcx, 288(%rdi)
adcq 296(%rdx), %r8
movq 304(%rsi), %rcx
movq %r8, 296(%rdi)
adcq 304(%rdx), %rcx
movq 312(%rsi), %r8
movq %rcx, 304(%rdi)
adcq 312(%rdx), %r8
movq 320(%rsi), %rcx
movq %r8, 312(%rdi)
adcq 320(%rdx), %rcx
movq 328(%rsi), %r8
movq %rcx, 320(%rdi)
adcq 328(%rdx), %r8
movq 336(%rsi), %rcx
movq %r8, 328(%rdi)
adcq 336(%rdx), %rcx
movq 344(%rsi), %r8
movq %rcx, 336(%rdi)
adcq 344(%rdx), %r8
movq 352(%rsi), %rcx
movq %r8, 344(%rdi)
adcq 352(%rdx), %rcx
movq 360(%rsi), %r8
movq %rcx, 352(%rdi)
adcq 360(%rdx), %r8
movq 368(%rsi), %rcx
movq %r8, 360(%rdi)
adcq 368(%rdx), %rcx
movq 376(%rsi), %r8
movq %rcx, 368(%rdi)
adcq 376(%rdx), %r8
movq %r8, 376(%rdi)
adcq $0x00, %rax
repz retq
#ifndef __APPLE__
.size sp_3072_add_48,.-sp_3072_add_48
#endif /* __APPLE__ */
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mul_48
.type sp_3072_mul_48,@function
.align 16
sp_3072_mul_48:
#else
.section __TEXT,__text
.globl _sp_3072_mul_48
.p2align 4
_sp_3072_mul_48:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x4a8, %rsp
movq %rdi, 1152(%rsp)
movq %rsi, 1160(%rsp)
movq %rdx, 1168(%rsp)
leaq 768(%rsp), %r10
leaq 192(%rsi), %r12
# Add
movq (%rsi), %rax
xorq %r13, %r13
addq (%r12), %rax
movq 8(%rsi), %rcx
movq %rax, (%r10)
adcq 8(%r12), %rcx
movq 16(%rsi), %r8
movq %rcx, 8(%r10)
adcq 16(%r12), %r8
movq 24(%rsi), %rax
movq %r8, 16(%r10)
adcq 24(%r12), %rax
movq 32(%rsi), %rcx
movq %rax, 24(%r10)
adcq 32(%r12), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%r10)
adcq 40(%r12), %r8
movq 48(%rsi), %rax
movq %r8, 40(%r10)
adcq 48(%r12), %rax
movq 56(%rsi), %rcx
movq %rax, 48(%r10)
adcq 56(%r12), %rcx
movq 64(%rsi), %r8
movq %rcx, 56(%r10)
adcq 64(%r12), %r8
movq 72(%rsi), %rax
movq %r8, 64(%r10)
adcq 72(%r12), %rax
movq 80(%rsi), %rcx
movq %rax, 72(%r10)
adcq 80(%r12), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%r10)
adcq 88(%r12), %r8
movq 96(%rsi), %rax
movq %r8, 88(%r10)
adcq 96(%r12), %rax
movq 104(%rsi), %rcx
movq %rax, 96(%r10)
adcq 104(%r12), %rcx
movq 112(%rsi), %r8
movq %rcx, 104(%r10)
adcq 112(%r12), %r8
movq 120(%rsi), %rax
movq %r8, 112(%r10)
adcq 120(%r12), %rax
movq 128(%rsi), %rcx
movq %rax, 120(%r10)
adcq 128(%r12), %rcx
movq 136(%rsi), %r8
movq %rcx, 128(%r10)
adcq 136(%r12), %r8
movq 144(%rsi), %rax
movq %r8, 136(%r10)
adcq 144(%r12), %rax
movq 152(%rsi), %rcx
movq %rax, 144(%r10)
adcq 152(%r12), %rcx
movq 160(%rsi), %r8
movq %rcx, 152(%r10)
adcq 160(%r12), %r8
movq 168(%rsi), %rax
movq %r8, 160(%r10)
adcq 168(%r12), %rax
movq 176(%rsi), %rcx
movq %rax, 168(%r10)
adcq 176(%r12), %rcx
movq 184(%rsi), %r8
movq %rcx, 176(%r10)
adcq 184(%r12), %r8
movq %r8, 184(%r10)
adcq $0x00, %r13
movq %r13, 1176(%rsp)
leaq 960(%rsp), %r11
leaq 192(%rdx), %r12
# Add
movq (%rdx), %rax
xorq %r14, %r14
addq (%r12), %rax
movq 8(%rdx), %rcx
movq %rax, (%r11)
adcq 8(%r12), %rcx
movq 16(%rdx), %r8
movq %rcx, 8(%r11)
adcq 16(%r12), %r8
movq 24(%rdx), %rax
movq %r8, 16(%r11)
adcq 24(%r12), %rax
movq 32(%rdx), %rcx
movq %rax, 24(%r11)
adcq 32(%r12), %rcx
movq 40(%rdx), %r8
movq %rcx, 32(%r11)
adcq 40(%r12), %r8
movq 48(%rdx), %rax
movq %r8, 40(%r11)
adcq 48(%r12), %rax
movq 56(%rdx), %rcx
movq %rax, 48(%r11)
adcq 56(%r12), %rcx
movq 64(%rdx), %r8
movq %rcx, 56(%r11)
adcq 64(%r12), %r8
movq 72(%rdx), %rax
movq %r8, 64(%r11)
adcq 72(%r12), %rax
movq 80(%rdx), %rcx
movq %rax, 72(%r11)
adcq 80(%r12), %rcx
movq 88(%rdx), %r8
movq %rcx, 80(%r11)
adcq 88(%r12), %r8
movq 96(%rdx), %rax
movq %r8, 88(%r11)
adcq 96(%r12), %rax
movq 104(%rdx), %rcx
movq %rax, 96(%r11)
adcq 104(%r12), %rcx
movq 112(%rdx), %r8
movq %rcx, 104(%r11)
adcq 112(%r12), %r8
movq 120(%rdx), %rax
movq %r8, 112(%r11)
adcq 120(%r12), %rax
movq 128(%rdx), %rcx
movq %rax, 120(%r11)
adcq 128(%r12), %rcx
movq 136(%rdx), %r8
movq %rcx, 128(%r11)
adcq 136(%r12), %r8
movq 144(%rdx), %rax
movq %r8, 136(%r11)
adcq 144(%r12), %rax
movq 152(%rdx), %rcx
movq %rax, 144(%r11)
adcq 152(%r12), %rcx
movq 160(%rdx), %r8
movq %rcx, 152(%r11)
adcq 160(%r12), %r8
movq 168(%rdx), %rax
movq %r8, 160(%r11)
adcq 168(%r12), %rax
movq 176(%rdx), %rcx
movq %rax, 168(%r11)
adcq 176(%r12), %rcx
movq 184(%rdx), %r8
movq %rcx, 176(%r11)
adcq 184(%r12), %r8
movq %r8, 184(%r11)
adcq $0x00, %r14
movq %r14, 1184(%rsp)
movq %r11, %rdx
movq %r10, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_3072_mul_24@plt
#else
callq _sp_3072_mul_24
#endif /* __APPLE__ */
movq 1168(%rsp), %rdx
movq 1160(%rsp), %rsi
leaq 384(%rsp), %rdi
addq $0xc0, %rdx
addq $0xc0, %rsi
#ifndef __APPLE__
callq sp_3072_mul_24@plt
#else
callq _sp_3072_mul_24
#endif /* __APPLE__ */
movq 1168(%rsp), %rdx
movq 1160(%rsp), %rsi
movq 1152(%rsp), %rdi
#ifndef __APPLE__
callq sp_3072_mul_24@plt
#else
callq _sp_3072_mul_24
#endif /* __APPLE__ */
#ifdef _WIN64
movq 1168(%rsp), %rdx
movq 1160(%rsp), %rsi
movq 1152(%rsp), %rdi
#endif /* _WIN64 */
movq 1176(%rsp), %r13
movq 1184(%rsp), %r14
movq 1152(%rsp), %r15
movq %r13, %r9
leaq 768(%rsp), %r10
leaq 960(%rsp), %r11
andq %r14, %r9
negq %r13
negq %r14
addq $0x180, %r15
movq (%r10), %rax
movq (%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, (%r10)
movq %rcx, (%r11)
movq 8(%r10), %rax
movq 8(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 8(%r10)
movq %rcx, 8(%r11)
movq 16(%r10), %rax
movq 16(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 16(%r10)
movq %rcx, 16(%r11)
movq 24(%r10), %rax
movq 24(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 24(%r10)
movq %rcx, 24(%r11)
movq 32(%r10), %rax
movq 32(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 32(%r10)
movq %rcx, 32(%r11)
movq 40(%r10), %rax
movq 40(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 40(%r10)
movq %rcx, 40(%r11)
movq 48(%r10), %rax
movq 48(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 48(%r10)
movq %rcx, 48(%r11)
movq 56(%r10), %rax
movq 56(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 56(%r10)
movq %rcx, 56(%r11)
movq 64(%r10), %rax
movq 64(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 64(%r10)
movq %rcx, 64(%r11)
movq 72(%r10), %rax
movq 72(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 72(%r10)
movq %rcx, 72(%r11)
movq 80(%r10), %rax
movq 80(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 80(%r10)
movq %rcx, 80(%r11)
movq 88(%r10), %rax
movq 88(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 88(%r10)
movq %rcx, 88(%r11)
movq 96(%r10), %rax
movq 96(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 96(%r10)
movq %rcx, 96(%r11)
movq 104(%r10), %rax
movq 104(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 104(%r10)
movq %rcx, 104(%r11)
movq 112(%r10), %rax
movq 112(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 112(%r10)
movq %rcx, 112(%r11)
movq 120(%r10), %rax
movq 120(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 120(%r10)
movq %rcx, 120(%r11)
movq 128(%r10), %rax
movq 128(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 128(%r10)
movq %rcx, 128(%r11)
movq 136(%r10), %rax
movq 136(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 136(%r10)
movq %rcx, 136(%r11)
movq 144(%r10), %rax
movq 144(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 144(%r10)
movq %rcx, 144(%r11)
movq 152(%r10), %rax
movq 152(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 152(%r10)
movq %rcx, 152(%r11)
movq 160(%r10), %rax
movq 160(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 160(%r10)
movq %rcx, 160(%r11)
movq 168(%r10), %rax
movq 168(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 168(%r10)
movq %rcx, 168(%r11)
movq 176(%r10), %rax
movq 176(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 176(%r10)
movq %rcx, 176(%r11)
movq 184(%r10), %rax
movq 184(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 184(%r10)
movq %rcx, 184(%r11)
movq (%r10), %rax
addq (%r11), %rax
movq 8(%r10), %rcx
movq %rax, (%r15)
adcq 8(%r11), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r15)
adcq 16(%r11), %r8
movq 24(%r10), %rax
movq %r8, 16(%r15)
adcq 24(%r11), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r15)
adcq 32(%r11), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r15)
adcq 40(%r11), %r8
movq 48(%r10), %rax
movq %r8, 40(%r15)
adcq 48(%r11), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r15)
adcq 56(%r11), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r15)
adcq 64(%r11), %r8
movq 72(%r10), %rax
movq %r8, 64(%r15)
adcq 72(%r11), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r15)
adcq 80(%r11), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r15)
adcq 88(%r11), %r8
movq 96(%r10), %rax
movq %r8, 88(%r15)
adcq 96(%r11), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r15)
adcq 104(%r11), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r15)
adcq 112(%r11), %r8
movq 120(%r10), %rax
movq %r8, 112(%r15)
adcq 120(%r11), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r15)
adcq 128(%r11), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r15)
adcq 136(%r11), %r8
movq 144(%r10), %rax
movq %r8, 136(%r15)
adcq 144(%r11), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r15)
adcq 152(%r11), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r15)
adcq 160(%r11), %r8
movq 168(%r10), %rax
movq %r8, 160(%r15)
adcq 168(%r11), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r15)
adcq 176(%r11), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r15)
adcq 184(%r11), %r8
movq %r8, 184(%r15)
adcq $0x00, %r9
leaq 384(%rsp), %r11
movq %rsp, %r10
movq (%r10), %rax
subq (%r11), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%r11), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%r11), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%r11), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%r11), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%r11), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%r11), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%r11), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%r11), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%r11), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%r11), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%r11), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%r11), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%r11), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%r11), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%r11), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%r11), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%r11), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%r11), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%r11), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%r11), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%r11), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%r11), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%r11), %r8
movq 192(%r10), %rax
movq %r8, 184(%r10)
sbbq 192(%r11), %rax
movq 200(%r10), %rcx
movq %rax, 192(%r10)
sbbq 200(%r11), %rcx
movq 208(%r10), %r8
movq %rcx, 200(%r10)
sbbq 208(%r11), %r8
movq 216(%r10), %rax
movq %r8, 208(%r10)
sbbq 216(%r11), %rax
movq 224(%r10), %rcx
movq %rax, 216(%r10)
sbbq 224(%r11), %rcx
movq 232(%r10), %r8
movq %rcx, 224(%r10)
sbbq 232(%r11), %r8
movq 240(%r10), %rax
movq %r8, 232(%r10)
sbbq 240(%r11), %rax
movq 248(%r10), %rcx
movq %rax, 240(%r10)
sbbq 248(%r11), %rcx
movq 256(%r10), %r8
movq %rcx, 248(%r10)
sbbq 256(%r11), %r8
movq 264(%r10), %rax
movq %r8, 256(%r10)
sbbq 264(%r11), %rax
movq 272(%r10), %rcx
movq %rax, 264(%r10)
sbbq 272(%r11), %rcx
movq 280(%r10), %r8
movq %rcx, 272(%r10)
sbbq 280(%r11), %r8
movq 288(%r10), %rax
movq %r8, 280(%r10)
sbbq 288(%r11), %rax
movq 296(%r10), %rcx
movq %rax, 288(%r10)
sbbq 296(%r11), %rcx
movq 304(%r10), %r8
movq %rcx, 296(%r10)
sbbq 304(%r11), %r8
movq 312(%r10), %rax
movq %r8, 304(%r10)
sbbq 312(%r11), %rax
movq 320(%r10), %rcx
movq %rax, 312(%r10)
sbbq 320(%r11), %rcx
movq 328(%r10), %r8
movq %rcx, 320(%r10)
sbbq 328(%r11), %r8
movq 336(%r10), %rax
movq %r8, 328(%r10)
sbbq 336(%r11), %rax
movq 344(%r10), %rcx
movq %rax, 336(%r10)
sbbq 344(%r11), %rcx
movq 352(%r10), %r8
movq %rcx, 344(%r10)
sbbq 352(%r11), %r8
movq 360(%r10), %rax
movq %r8, 352(%r10)
sbbq 360(%r11), %rax
movq 368(%r10), %rcx
movq %rax, 360(%r10)
sbbq 368(%r11), %rcx
movq 376(%r10), %r8
movq %rcx, 368(%r10)
sbbq 376(%r11), %r8
movq %r8, 376(%r10)
sbbq $0x00, %r9
movq (%r10), %rax
subq (%rdi), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%rdi), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%rdi), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%rdi), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%rdi), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%rdi), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%rdi), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%rdi), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%rdi), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%rdi), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%rdi), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%rdi), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%rdi), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%rdi), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%rdi), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%rdi), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%rdi), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%rdi), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%rdi), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%rdi), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%rdi), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%rdi), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%rdi), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%rdi), %r8
movq 192(%r10), %rax
movq %r8, 184(%r10)
sbbq 192(%rdi), %rax
movq 200(%r10), %rcx
movq %rax, 192(%r10)
sbbq 200(%rdi), %rcx
movq 208(%r10), %r8
movq %rcx, 200(%r10)
sbbq 208(%rdi), %r8
movq 216(%r10), %rax
movq %r8, 208(%r10)
sbbq 216(%rdi), %rax
movq 224(%r10), %rcx
movq %rax, 216(%r10)
sbbq 224(%rdi), %rcx
movq 232(%r10), %r8
movq %rcx, 224(%r10)
sbbq 232(%rdi), %r8
movq 240(%r10), %rax
movq %r8, 232(%r10)
sbbq 240(%rdi), %rax
movq 248(%r10), %rcx
movq %rax, 240(%r10)
sbbq 248(%rdi), %rcx
movq 256(%r10), %r8
movq %rcx, 248(%r10)
sbbq 256(%rdi), %r8
movq 264(%r10), %rax
movq %r8, 256(%r10)
sbbq 264(%rdi), %rax
movq 272(%r10), %rcx
movq %rax, 264(%r10)
sbbq 272(%rdi), %rcx
movq 280(%r10), %r8
movq %rcx, 272(%r10)
sbbq 280(%rdi), %r8
movq 288(%r10), %rax
movq %r8, 280(%r10)
sbbq 288(%rdi), %rax
movq 296(%r10), %rcx
movq %rax, 288(%r10)
sbbq 296(%rdi), %rcx
movq 304(%r10), %r8
movq %rcx, 296(%r10)
sbbq 304(%rdi), %r8
movq 312(%r10), %rax
movq %r8, 304(%r10)
sbbq 312(%rdi), %rax
movq 320(%r10), %rcx
movq %rax, 312(%r10)
sbbq 320(%rdi), %rcx
movq 328(%r10), %r8
movq %rcx, 320(%r10)
sbbq 328(%rdi), %r8
movq 336(%r10), %rax
movq %r8, 328(%r10)
sbbq 336(%rdi), %rax
movq 344(%r10), %rcx
movq %rax, 336(%r10)
sbbq 344(%rdi), %rcx
movq 352(%r10), %r8
movq %rcx, 344(%r10)
sbbq 352(%rdi), %r8
movq 360(%r10), %rax
movq %r8, 352(%r10)
sbbq 360(%rdi), %rax
movq 368(%r10), %rcx
movq %rax, 360(%r10)
sbbq 368(%rdi), %rcx
movq 376(%r10), %r8
movq %rcx, 368(%r10)
sbbq 376(%rdi), %r8
movq %r8, 376(%r10)
sbbq $0x00, %r9
subq $0xc0, %r15
# Add
movq (%r15), %rax
addq (%r10), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r10), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r10), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r10), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r10), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r10), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r10), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r10), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r10), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r10), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r10), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r10), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r10), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r10), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r10), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r10), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r10), %rcx
movq 136(%r15), %r8
movq %rcx, 128(%r15)
adcq 136(%r10), %r8
movq 144(%r15), %rax
movq %r8, 136(%r15)
adcq 144(%r10), %rax
movq 152(%r15), %rcx
movq %rax, 144(%r15)
adcq 152(%r10), %rcx
movq 160(%r15), %r8
movq %rcx, 152(%r15)
adcq 160(%r10), %r8
movq 168(%r15), %rax
movq %r8, 160(%r15)
adcq 168(%r10), %rax
movq 176(%r15), %rcx
movq %rax, 168(%r15)
adcq 176(%r10), %rcx
movq 184(%r15), %r8
movq %rcx, 176(%r15)
adcq 184(%r10), %r8
movq 192(%r15), %rax
movq %r8, 184(%r15)
adcq 192(%r10), %rax
movq 200(%r15), %rcx
movq %rax, 192(%r15)
adcq 200(%r10), %rcx
movq 208(%r15), %r8
movq %rcx, 200(%r15)
adcq 208(%r10), %r8
movq 216(%r15), %rax
movq %r8, 208(%r15)
adcq 216(%r10), %rax
movq 224(%r15), %rcx
movq %rax, 216(%r15)
adcq 224(%r10), %rcx
movq 232(%r15), %r8
movq %rcx, 224(%r15)
adcq 232(%r10), %r8
movq 240(%r15), %rax
movq %r8, 232(%r15)
adcq 240(%r10), %rax
movq 248(%r15), %rcx
movq %rax, 240(%r15)
adcq 248(%r10), %rcx
movq 256(%r15), %r8
movq %rcx, 248(%r15)
adcq 256(%r10), %r8
movq 264(%r15), %rax
movq %r8, 256(%r15)
adcq 264(%r10), %rax
movq 272(%r15), %rcx
movq %rax, 264(%r15)
adcq 272(%r10), %rcx
movq 280(%r15), %r8
movq %rcx, 272(%r15)
adcq 280(%r10), %r8
movq 288(%r15), %rax
movq %r8, 280(%r15)
adcq 288(%r10), %rax
movq 296(%r15), %rcx
movq %rax, 288(%r15)
adcq 296(%r10), %rcx
movq 304(%r15), %r8
movq %rcx, 296(%r15)
adcq 304(%r10), %r8
movq 312(%r15), %rax
movq %r8, 304(%r15)
adcq 312(%r10), %rax
movq 320(%r15), %rcx
movq %rax, 312(%r15)
adcq 320(%r10), %rcx
movq 328(%r15), %r8
movq %rcx, 320(%r15)
adcq 328(%r10), %r8
movq 336(%r15), %rax
movq %r8, 328(%r15)
adcq 336(%r10), %rax
movq 344(%r15), %rcx
movq %rax, 336(%r15)
adcq 344(%r10), %rcx
movq 352(%r15), %r8
movq %rcx, 344(%r15)
adcq 352(%r10), %r8
movq 360(%r15), %rax
movq %r8, 352(%r15)
adcq 360(%r10), %rax
movq 368(%r15), %rcx
movq %rax, 360(%r15)
adcq 368(%r10), %rcx
movq 376(%r15), %r8
movq %rcx, 368(%r15)
adcq 376(%r10), %r8
movq %r8, 376(%r15)
adcq $0x00, %r9
movq %r9, 576(%rdi)
addq $0xc0, %r15
# Add
movq (%r15), %rax
addq (%r11), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r11), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r11), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r11), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r11), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r11), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r11), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r11), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r11), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r11), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r11), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r11), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r11), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r11), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r11), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r11), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r11), %rcx
movq 136(%r15), %r8
movq %rcx, 128(%r15)
adcq 136(%r11), %r8
movq 144(%r15), %rax
movq %r8, 136(%r15)
adcq 144(%r11), %rax
movq 152(%r15), %rcx
movq %rax, 144(%r15)
adcq 152(%r11), %rcx
movq 160(%r15), %r8
movq %rcx, 152(%r15)
adcq 160(%r11), %r8
movq 168(%r15), %rax
movq %r8, 160(%r15)
adcq 168(%r11), %rax
movq 176(%r15), %rcx
movq %rax, 168(%r15)
adcq 176(%r11), %rcx
movq 184(%r15), %r8
movq %rcx, 176(%r15)
adcq 184(%r11), %r8
movq 192(%r15), %rax
movq %r8, 184(%r15)
adcq 192(%r11), %rax
movq %rax, 192(%r15)
# Add to zero
movq 200(%r11), %rax
adcq $0x00, %rax
movq 208(%r11), %rcx
movq %rax, 200(%r15)
adcq $0x00, %rcx
movq 216(%r11), %r8
movq %rcx, 208(%r15)
adcq $0x00, %r8
movq 224(%r11), %rax
movq %r8, 216(%r15)
adcq $0x00, %rax
movq 232(%r11), %rcx
movq %rax, 224(%r15)
adcq $0x00, %rcx
movq 240(%r11), %r8
movq %rcx, 232(%r15)
adcq $0x00, %r8
movq 248(%r11), %rax
movq %r8, 240(%r15)
adcq $0x00, %rax
movq 256(%r11), %rcx
movq %rax, 248(%r15)
adcq $0x00, %rcx
movq 264(%r11), %r8
movq %rcx, 256(%r15)
adcq $0x00, %r8
movq 272(%r11), %rax
movq %r8, 264(%r15)
adcq $0x00, %rax
movq 280(%r11), %rcx
movq %rax, 272(%r15)
adcq $0x00, %rcx
movq 288(%r11), %r8
movq %rcx, 280(%r15)
adcq $0x00, %r8
movq 296(%r11), %rax
movq %r8, 288(%r15)
adcq $0x00, %rax
movq 304(%r11), %rcx
movq %rax, 296(%r15)
adcq $0x00, %rcx
movq 312(%r11), %r8
movq %rcx, 304(%r15)
adcq $0x00, %r8
movq 320(%r11), %rax
movq %r8, 312(%r15)
adcq $0x00, %rax
movq 328(%r11), %rcx
movq %rax, 320(%r15)
adcq $0x00, %rcx
movq 336(%r11), %r8
movq %rcx, 328(%r15)
adcq $0x00, %r8
movq 344(%r11), %rax
movq %r8, 336(%r15)
adcq $0x00, %rax
movq 352(%r11), %rcx
movq %rax, 344(%r15)
adcq $0x00, %rcx
movq 360(%r11), %r8
movq %rcx, 352(%r15)
adcq $0x00, %r8
movq 368(%r11), %rax
movq %r8, 360(%r15)
adcq $0x00, %rax
movq 376(%r11), %rcx
movq %rax, 368(%r15)
adcq $0x00, %rcx
movq %rcx, 376(%r15)
addq $0x4a8, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_3072_mul_48,.-sp_3072_mul_48
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mul_avx2_48
.type sp_3072_mul_avx2_48,@function
.align 16
sp_3072_mul_avx2_48:
#else
.section __TEXT,__text
.globl _sp_3072_mul_avx2_48
.p2align 4
_sp_3072_mul_avx2_48:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x4a8, %rsp
movq %rdi, 1152(%rsp)
movq %rsi, 1160(%rsp)
movq %rdx, 1168(%rsp)
leaq 768(%rsp), %r10
leaq 192(%rsi), %r12
# Add
movq (%rsi), %rax
xorq %r13, %r13
addq (%r12), %rax
movq 8(%rsi), %rcx
movq %rax, (%r10)
adcq 8(%r12), %rcx
movq 16(%rsi), %r8
movq %rcx, 8(%r10)
adcq 16(%r12), %r8
movq 24(%rsi), %rax
movq %r8, 16(%r10)
adcq 24(%r12), %rax
movq 32(%rsi), %rcx
movq %rax, 24(%r10)
adcq 32(%r12), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%r10)
adcq 40(%r12), %r8
movq 48(%rsi), %rax
movq %r8, 40(%r10)
adcq 48(%r12), %rax
movq 56(%rsi), %rcx
movq %rax, 48(%r10)
adcq 56(%r12), %rcx
movq 64(%rsi), %r8
movq %rcx, 56(%r10)
adcq 64(%r12), %r8
movq 72(%rsi), %rax
movq %r8, 64(%r10)
adcq 72(%r12), %rax
movq 80(%rsi), %rcx
movq %rax, 72(%r10)
adcq 80(%r12), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%r10)
adcq 88(%r12), %r8
movq 96(%rsi), %rax
movq %r8, 88(%r10)
adcq 96(%r12), %rax
movq 104(%rsi), %rcx
movq %rax, 96(%r10)
adcq 104(%r12), %rcx
movq 112(%rsi), %r8
movq %rcx, 104(%r10)
adcq 112(%r12), %r8
movq 120(%rsi), %rax
movq %r8, 112(%r10)
adcq 120(%r12), %rax
movq 128(%rsi), %rcx
movq %rax, 120(%r10)
adcq 128(%r12), %rcx
movq 136(%rsi), %r8
movq %rcx, 128(%r10)
adcq 136(%r12), %r8
movq 144(%rsi), %rax
movq %r8, 136(%r10)
adcq 144(%r12), %rax
movq 152(%rsi), %rcx
movq %rax, 144(%r10)
adcq 152(%r12), %rcx
movq 160(%rsi), %r8
movq %rcx, 152(%r10)
adcq 160(%r12), %r8
movq 168(%rsi), %rax
movq %r8, 160(%r10)
adcq 168(%r12), %rax
movq 176(%rsi), %rcx
movq %rax, 168(%r10)
adcq 176(%r12), %rcx
movq 184(%rsi), %r8
movq %rcx, 176(%r10)
adcq 184(%r12), %r8
movq %r8, 184(%r10)
adcq $0x00, %r13
movq %r13, 1176(%rsp)
leaq 960(%rsp), %r11
leaq 192(%rdx), %r12
# Add
movq (%rdx), %rax
xorq %r14, %r14
addq (%r12), %rax
movq 8(%rdx), %rcx
movq %rax, (%r11)
adcq 8(%r12), %rcx
movq 16(%rdx), %r8
movq %rcx, 8(%r11)
adcq 16(%r12), %r8
movq 24(%rdx), %rax
movq %r8, 16(%r11)
adcq 24(%r12), %rax
movq 32(%rdx), %rcx
movq %rax, 24(%r11)
adcq 32(%r12), %rcx
movq 40(%rdx), %r8
movq %rcx, 32(%r11)
adcq 40(%r12), %r8
movq 48(%rdx), %rax
movq %r8, 40(%r11)
adcq 48(%r12), %rax
movq 56(%rdx), %rcx
movq %rax, 48(%r11)
adcq 56(%r12), %rcx
movq 64(%rdx), %r8
movq %rcx, 56(%r11)
adcq 64(%r12), %r8
movq 72(%rdx), %rax
movq %r8, 64(%r11)
adcq 72(%r12), %rax
movq 80(%rdx), %rcx
movq %rax, 72(%r11)
adcq 80(%r12), %rcx
movq 88(%rdx), %r8
movq %rcx, 80(%r11)
adcq 88(%r12), %r8
movq 96(%rdx), %rax
movq %r8, 88(%r11)
adcq 96(%r12), %rax
movq 104(%rdx), %rcx
movq %rax, 96(%r11)
adcq 104(%r12), %rcx
movq 112(%rdx), %r8
movq %rcx, 104(%r11)
adcq 112(%r12), %r8
movq 120(%rdx), %rax
movq %r8, 112(%r11)
adcq 120(%r12), %rax
movq 128(%rdx), %rcx
movq %rax, 120(%r11)
adcq 128(%r12), %rcx
movq 136(%rdx), %r8
movq %rcx, 128(%r11)
adcq 136(%r12), %r8
movq 144(%rdx), %rax
movq %r8, 136(%r11)
adcq 144(%r12), %rax
movq 152(%rdx), %rcx
movq %rax, 144(%r11)
adcq 152(%r12), %rcx
movq 160(%rdx), %r8
movq %rcx, 152(%r11)
adcq 160(%r12), %r8
movq 168(%rdx), %rax
movq %r8, 160(%r11)
adcq 168(%r12), %rax
movq 176(%rdx), %rcx
movq %rax, 168(%r11)
adcq 176(%r12), %rcx
movq 184(%rdx), %r8
movq %rcx, 176(%r11)
adcq 184(%r12), %r8
movq %r8, 184(%r11)
adcq $0x00, %r14
movq %r14, 1184(%rsp)
movq %r11, %rdx
movq %r10, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_3072_mul_avx2_24@plt
#else
callq _sp_3072_mul_avx2_24
#endif /* __APPLE__ */
movq 1168(%rsp), %rdx
movq 1160(%rsp), %rsi
leaq 384(%rsp), %rdi
addq $0xc0, %rdx
addq $0xc0, %rsi
#ifndef __APPLE__
callq sp_3072_mul_avx2_24@plt
#else
callq _sp_3072_mul_avx2_24
#endif /* __APPLE__ */
movq 1168(%rsp), %rdx
movq 1160(%rsp), %rsi
movq 1152(%rsp), %rdi
#ifndef __APPLE__
callq sp_3072_mul_avx2_24@plt
#else
callq _sp_3072_mul_avx2_24
#endif /* __APPLE__ */
#ifdef _WIN64
movq 1168(%rsp), %rdx
movq 1160(%rsp), %rsi
movq 1152(%rsp), %rdi
#endif /* _WIN64 */
movq 1176(%rsp), %r13
movq 1184(%rsp), %r14
movq 1152(%rsp), %r15
movq %r13, %r9
leaq 768(%rsp), %r10
leaq 960(%rsp), %r11
andq %r14, %r9
negq %r13
negq %r14
addq $0x180, %r15
movq (%r10), %rax
movq (%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
addq %rcx, %rax
movq 8(%r10), %rcx
movq 8(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, (%r15)
adcq %r8, %rcx
movq 16(%r10), %r8
movq 16(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 8(%r15)
adcq %rax, %r8
movq 24(%r10), %rax
movq 24(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 16(%r15)
adcq %rcx, %rax
movq 32(%r10), %rcx
movq 32(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 24(%r15)
adcq %r8, %rcx
movq 40(%r10), %r8
movq 40(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 32(%r15)
adcq %rax, %r8
movq 48(%r10), %rax
movq 48(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 40(%r15)
adcq %rcx, %rax
movq 56(%r10), %rcx
movq 56(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 48(%r15)
adcq %r8, %rcx
movq 64(%r10), %r8
movq 64(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 56(%r15)
adcq %rax, %r8
movq 72(%r10), %rax
movq 72(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 64(%r15)
adcq %rcx, %rax
movq 80(%r10), %rcx
movq 80(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 72(%r15)
adcq %r8, %rcx
movq 88(%r10), %r8
movq 88(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 80(%r15)
adcq %rax, %r8
movq 96(%r10), %rax
movq 96(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 88(%r15)
adcq %rcx, %rax
movq 104(%r10), %rcx
movq 104(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 96(%r15)
adcq %r8, %rcx
movq 112(%r10), %r8
movq 112(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 104(%r15)
adcq %rax, %r8
movq 120(%r10), %rax
movq 120(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 112(%r15)
adcq %rcx, %rax
movq 128(%r10), %rcx
movq 128(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 120(%r15)
adcq %r8, %rcx
movq 136(%r10), %r8
movq 136(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 128(%r15)
adcq %rax, %r8
movq 144(%r10), %rax
movq 144(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 136(%r15)
adcq %rcx, %rax
movq 152(%r10), %rcx
movq 152(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 144(%r15)
adcq %r8, %rcx
movq 160(%r10), %r8
movq 160(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 152(%r15)
adcq %rax, %r8
movq 168(%r10), %rax
movq 168(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 160(%r15)
adcq %rcx, %rax
movq 176(%r10), %rcx
movq 176(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 168(%r15)
adcq %r8, %rcx
movq 184(%r10), %r8
movq 184(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 176(%r15)
adcq %rax, %r8
movq %r8, 184(%r15)
adcq $0x00, %r9
leaq 384(%rsp), %r11
movq %rsp, %r10
movq (%r10), %rax
subq (%r11), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%r11), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%r11), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%r11), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%r11), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%r11), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%r11), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%r11), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%r11), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%r11), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%r11), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%r11), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%r11), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%r11), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%r11), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%r11), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%r11), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%r11), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%r11), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%r11), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%r11), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%r11), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%r11), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%r11), %r8
movq 192(%r10), %rax
movq %r8, 184(%r10)
sbbq 192(%r11), %rax
movq 200(%r10), %rcx
movq %rax, 192(%r10)
sbbq 200(%r11), %rcx
movq 208(%r10), %r8
movq %rcx, 200(%r10)
sbbq 208(%r11), %r8
movq 216(%r10), %rax
movq %r8, 208(%r10)
sbbq 216(%r11), %rax
movq 224(%r10), %rcx
movq %rax, 216(%r10)
sbbq 224(%r11), %rcx
movq 232(%r10), %r8
movq %rcx, 224(%r10)
sbbq 232(%r11), %r8
movq 240(%r10), %rax
movq %r8, 232(%r10)
sbbq 240(%r11), %rax
movq 248(%r10), %rcx
movq %rax, 240(%r10)
sbbq 248(%r11), %rcx
movq 256(%r10), %r8
movq %rcx, 248(%r10)
sbbq 256(%r11), %r8
movq 264(%r10), %rax
movq %r8, 256(%r10)
sbbq 264(%r11), %rax
movq 272(%r10), %rcx
movq %rax, 264(%r10)
sbbq 272(%r11), %rcx
movq 280(%r10), %r8
movq %rcx, 272(%r10)
sbbq 280(%r11), %r8
movq 288(%r10), %rax
movq %r8, 280(%r10)
sbbq 288(%r11), %rax
movq 296(%r10), %rcx
movq %rax, 288(%r10)
sbbq 296(%r11), %rcx
movq 304(%r10), %r8
movq %rcx, 296(%r10)
sbbq 304(%r11), %r8
movq 312(%r10), %rax
movq %r8, 304(%r10)
sbbq 312(%r11), %rax
movq 320(%r10), %rcx
movq %rax, 312(%r10)
sbbq 320(%r11), %rcx
movq 328(%r10), %r8
movq %rcx, 320(%r10)
sbbq 328(%r11), %r8
movq 336(%r10), %rax
movq %r8, 328(%r10)
sbbq 336(%r11), %rax
movq 344(%r10), %rcx
movq %rax, 336(%r10)
sbbq 344(%r11), %rcx
movq 352(%r10), %r8
movq %rcx, 344(%r10)
sbbq 352(%r11), %r8
movq 360(%r10), %rax
movq %r8, 352(%r10)
sbbq 360(%r11), %rax
movq 368(%r10), %rcx
movq %rax, 360(%r10)
sbbq 368(%r11), %rcx
movq 376(%r10), %r8
movq %rcx, 368(%r10)
sbbq 376(%r11), %r8
movq %r8, 376(%r10)
sbbq $0x00, %r9
movq (%r10), %rax
subq (%rdi), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%rdi), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%rdi), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%rdi), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%rdi), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%rdi), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%rdi), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%rdi), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%rdi), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%rdi), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%rdi), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%rdi), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%rdi), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%rdi), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%rdi), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%rdi), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%rdi), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%rdi), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%rdi), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%rdi), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%rdi), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%rdi), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%rdi), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%rdi), %r8
movq 192(%r10), %rax
movq %r8, 184(%r10)
sbbq 192(%rdi), %rax
movq 200(%r10), %rcx
movq %rax, 192(%r10)
sbbq 200(%rdi), %rcx
movq 208(%r10), %r8
movq %rcx, 200(%r10)
sbbq 208(%rdi), %r8
movq 216(%r10), %rax
movq %r8, 208(%r10)
sbbq 216(%rdi), %rax
movq 224(%r10), %rcx
movq %rax, 216(%r10)
sbbq 224(%rdi), %rcx
movq 232(%r10), %r8
movq %rcx, 224(%r10)
sbbq 232(%rdi), %r8
movq 240(%r10), %rax
movq %r8, 232(%r10)
sbbq 240(%rdi), %rax
movq 248(%r10), %rcx
movq %rax, 240(%r10)
sbbq 248(%rdi), %rcx
movq 256(%r10), %r8
movq %rcx, 248(%r10)
sbbq 256(%rdi), %r8
movq 264(%r10), %rax
movq %r8, 256(%r10)
sbbq 264(%rdi), %rax
movq 272(%r10), %rcx
movq %rax, 264(%r10)
sbbq 272(%rdi), %rcx
movq 280(%r10), %r8
movq %rcx, 272(%r10)
sbbq 280(%rdi), %r8
movq 288(%r10), %rax
movq %r8, 280(%r10)
sbbq 288(%rdi), %rax
movq 296(%r10), %rcx
movq %rax, 288(%r10)
sbbq 296(%rdi), %rcx
movq 304(%r10), %r8
movq %rcx, 296(%r10)
sbbq 304(%rdi), %r8
movq 312(%r10), %rax
movq %r8, 304(%r10)
sbbq 312(%rdi), %rax
movq 320(%r10), %rcx
movq %rax, 312(%r10)
sbbq 320(%rdi), %rcx
movq 328(%r10), %r8
movq %rcx, 320(%r10)
sbbq 328(%rdi), %r8
movq 336(%r10), %rax
movq %r8, 328(%r10)
sbbq 336(%rdi), %rax
movq 344(%r10), %rcx
movq %rax, 336(%r10)
sbbq 344(%rdi), %rcx
movq 352(%r10), %r8
movq %rcx, 344(%r10)
sbbq 352(%rdi), %r8
movq 360(%r10), %rax
movq %r8, 352(%r10)
sbbq 360(%rdi), %rax
movq 368(%r10), %rcx
movq %rax, 360(%r10)
sbbq 368(%rdi), %rcx
movq 376(%r10), %r8
movq %rcx, 368(%r10)
sbbq 376(%rdi), %r8
movq %r8, 376(%r10)
sbbq $0x00, %r9
subq $0xc0, %r15
# Add
movq (%r15), %rax
addq (%r10), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r10), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r10), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r10), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r10), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r10), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r10), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r10), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r10), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r10), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r10), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r10), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r10), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r10), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r10), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r10), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r10), %rcx
movq 136(%r15), %r8
movq %rcx, 128(%r15)
adcq 136(%r10), %r8
movq 144(%r15), %rax
movq %r8, 136(%r15)
adcq 144(%r10), %rax
movq 152(%r15), %rcx
movq %rax, 144(%r15)
adcq 152(%r10), %rcx
movq 160(%r15), %r8
movq %rcx, 152(%r15)
adcq 160(%r10), %r8
movq 168(%r15), %rax
movq %r8, 160(%r15)
adcq 168(%r10), %rax
movq 176(%r15), %rcx
movq %rax, 168(%r15)
adcq 176(%r10), %rcx
movq 184(%r15), %r8
movq %rcx, 176(%r15)
adcq 184(%r10), %r8
movq 192(%r15), %rax
movq %r8, 184(%r15)
adcq 192(%r10), %rax
movq 200(%r15), %rcx
movq %rax, 192(%r15)
adcq 200(%r10), %rcx
movq 208(%r15), %r8
movq %rcx, 200(%r15)
adcq 208(%r10), %r8
movq 216(%r15), %rax
movq %r8, 208(%r15)
adcq 216(%r10), %rax
movq 224(%r15), %rcx
movq %rax, 216(%r15)
adcq 224(%r10), %rcx
movq 232(%r15), %r8
movq %rcx, 224(%r15)
adcq 232(%r10), %r8
movq 240(%r15), %rax
movq %r8, 232(%r15)
adcq 240(%r10), %rax
movq 248(%r15), %rcx
movq %rax, 240(%r15)
adcq 248(%r10), %rcx
movq 256(%r15), %r8
movq %rcx, 248(%r15)
adcq 256(%r10), %r8
movq 264(%r15), %rax
movq %r8, 256(%r15)
adcq 264(%r10), %rax
movq 272(%r15), %rcx
movq %rax, 264(%r15)
adcq 272(%r10), %rcx
movq 280(%r15), %r8
movq %rcx, 272(%r15)
adcq 280(%r10), %r8
movq 288(%r15), %rax
movq %r8, 280(%r15)
adcq 288(%r10), %rax
movq 296(%r15), %rcx
movq %rax, 288(%r15)
adcq 296(%r10), %rcx
movq 304(%r15), %r8
movq %rcx, 296(%r15)
adcq 304(%r10), %r8
movq 312(%r15), %rax
movq %r8, 304(%r15)
adcq 312(%r10), %rax
movq 320(%r15), %rcx
movq %rax, 312(%r15)
adcq 320(%r10), %rcx
movq 328(%r15), %r8
movq %rcx, 320(%r15)
adcq 328(%r10), %r8
movq 336(%r15), %rax
movq %r8, 328(%r15)
adcq 336(%r10), %rax
movq 344(%r15), %rcx
movq %rax, 336(%r15)
adcq 344(%r10), %rcx
movq 352(%r15), %r8
movq %rcx, 344(%r15)
adcq 352(%r10), %r8
movq 360(%r15), %rax
movq %r8, 352(%r15)
adcq 360(%r10), %rax
movq 368(%r15), %rcx
movq %rax, 360(%r15)
adcq 368(%r10), %rcx
movq 376(%r15), %r8
movq %rcx, 368(%r15)
adcq 376(%r10), %r8
movq %r8, 376(%r15)
adcq $0x00, %r9
movq %r9, 576(%rdi)
addq $0xc0, %r15
# Add
movq (%r15), %rax
addq (%r11), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r11), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r11), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r11), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r11), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r11), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r11), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r11), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r11), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r11), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r11), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r11), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r11), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r11), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r11), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r11), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r11), %rcx
movq 136(%r15), %r8
movq %rcx, 128(%r15)
adcq 136(%r11), %r8
movq 144(%r15), %rax
movq %r8, 136(%r15)
adcq 144(%r11), %rax
movq 152(%r15), %rcx
movq %rax, 144(%r15)
adcq 152(%r11), %rcx
movq 160(%r15), %r8
movq %rcx, 152(%r15)
adcq 160(%r11), %r8
movq 168(%r15), %rax
movq %r8, 160(%r15)
adcq 168(%r11), %rax
movq 176(%r15), %rcx
movq %rax, 168(%r15)
adcq 176(%r11), %rcx
movq 184(%r15), %r8
movq %rcx, 176(%r15)
adcq 184(%r11), %r8
movq 192(%r15), %rax
movq %r8, 184(%r15)
adcq 192(%r11), %rax
movq %rax, 192(%r15)
# Add to zero
movq 200(%r11), %rax
adcq $0x00, %rax
movq 208(%r11), %rcx
movq %rax, 200(%r15)
adcq $0x00, %rcx
movq 216(%r11), %r8
movq %rcx, 208(%r15)
adcq $0x00, %r8
movq 224(%r11), %rax
movq %r8, 216(%r15)
adcq $0x00, %rax
movq 232(%r11), %rcx
movq %rax, 224(%r15)
adcq $0x00, %rcx
movq 240(%r11), %r8
movq %rcx, 232(%r15)
adcq $0x00, %r8
movq 248(%r11), %rax
movq %r8, 240(%r15)
adcq $0x00, %rax
movq 256(%r11), %rcx
movq %rax, 248(%r15)
adcq $0x00, %rcx
movq 264(%r11), %r8
movq %rcx, 256(%r15)
adcq $0x00, %r8
movq 272(%r11), %rax
movq %r8, 264(%r15)
adcq $0x00, %rax
movq 280(%r11), %rcx
movq %rax, 272(%r15)
adcq $0x00, %rcx
movq 288(%r11), %r8
movq %rcx, 280(%r15)
adcq $0x00, %r8
movq 296(%r11), %rax
movq %r8, 288(%r15)
adcq $0x00, %rax
movq 304(%r11), %rcx
movq %rax, 296(%r15)
adcq $0x00, %rcx
movq 312(%r11), %r8
movq %rcx, 304(%r15)
adcq $0x00, %r8
movq 320(%r11), %rax
movq %r8, 312(%r15)
adcq $0x00, %rax
movq 328(%r11), %rcx
movq %rax, 320(%r15)
adcq $0x00, %rcx
movq 336(%r11), %r8
movq %rcx, 328(%r15)
adcq $0x00, %r8
movq 344(%r11), %rax
movq %r8, 336(%r15)
adcq $0x00, %rax
movq 352(%r11), %rcx
movq %rax, 344(%r15)
adcq $0x00, %rcx
movq 360(%r11), %r8
movq %rcx, 352(%r15)
adcq $0x00, %r8
movq 368(%r11), %rax
movq %r8, 360(%r15)
adcq $0x00, %rax
movq 376(%r11), %rcx
movq %rax, 368(%r15)
adcq $0x00, %rcx
movq %rcx, 376(%r15)
addq $0x4a8, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_3072_mul_avx2_48,.-sp_3072_mul_avx2_48
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Square a and put result in r. (r = a * a)
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_sqr_12
.type sp_3072_sqr_12,@function
.align 16
sp_3072_sqr_12:
#else
.section __TEXT,__text
.globl _sp_3072_sqr_12
.p2align 4
_sp_3072_sqr_12:
#endif /* __APPLE__ */
pushq %r12
subq $0x60, %rsp
# A[0] * A[0]
movq (%rsi), %rax
mulq %rax
xorq %r9, %r9
movq %rax, (%rsp)
movq %rdx, %r8
# A[0] * A[1]
movq 8(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 8(%rsp)
# A[0] * A[2]
movq 16(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
# A[1] * A[1]
movq 8(%rsi), %rax
mulq %rax
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 16(%rsp)
# A[0] * A[3]
movq 24(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * A[2]
movq 16(%rsi), %rax
mulq 8(%rsi)
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 24(%rsp)
# A[0] * A[4]
movq 32(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[1] * A[3]
movq 24(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[2] * A[2]
movq 16(%rsi), %rax
mulq %rax
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 32(%rsp)
# A[0] * A[5]
movq 40(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[4]
movq 32(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[3]
movq 24(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 40(%rsp)
# A[0] * A[6]
movq 48(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[5]
movq 40(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[4]
movq 32(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[3]
movq 24(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 48(%rsp)
# A[0] * A[7]
movq 56(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[6]
movq 48(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[5]
movq 40(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[4]
movq 32(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 56(%rsp)
# A[0] * A[8]
movq 64(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[7]
movq 56(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[6]
movq 48(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[5]
movq 40(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[4]
movq 32(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 64(%rsp)
# A[0] * A[9]
movq 72(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[8]
movq 64(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[7]
movq 56(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[6]
movq 48(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[5]
movq 40(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 72(%rsp)
# A[0] * A[10]
movq 80(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[9]
movq 72(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[8]
movq 64(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[7]
movq 56(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[6]
movq 48(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[5]
movq 40(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 80(%rsp)
# A[0] * A[11]
movq 88(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[10]
movq 80(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[9]
movq 72(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[8]
movq 64(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[7]
movq 56(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[6]
movq 48(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 88(%rsp)
# A[1] * A[11]
movq 88(%rsi), %rax
mulq 8(%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[2] * A[10]
movq 80(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[9]
movq 72(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[8]
movq 64(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[7]
movq 56(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[6]
movq 48(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 96(%rdi)
# A[2] * A[11]
movq 88(%rsi), %rax
mulq 16(%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[3] * A[10]
movq 80(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[9]
movq 72(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[8]
movq 64(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[7]
movq 56(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 104(%rdi)
# A[3] * A[11]
movq 88(%rsi), %rax
mulq 24(%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[4] * A[10]
movq 80(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[9]
movq 72(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[8]
movq 64(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[7]
movq 56(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 112(%rdi)
# A[4] * A[11]
movq 88(%rsi), %rax
mulq 32(%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[5] * A[10]
movq 80(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[9]
movq 72(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[8]
movq 64(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 120(%rdi)
# A[5] * A[11]
movq 88(%rsi), %rax
mulq 40(%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[6] * A[10]
movq 80(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[9]
movq 72(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[8]
movq 64(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 128(%rdi)
# A[6] * A[11]
movq 88(%rsi), %rax
mulq 48(%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[7] * A[10]
movq 80(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[9]
movq 72(%rsi), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 136(%rdi)
# A[7] * A[11]
movq 88(%rsi), %rax
mulq 56(%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * A[10]
movq 80(%rsi), %rax
mulq 64(%rsi)
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * A[9]
movq 72(%rsi), %rax
mulq %rax
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 144(%rdi)
# A[8] * A[11]
movq 88(%rsi), %rax
mulq 64(%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[9] * A[10]
movq 80(%rsi), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 152(%rdi)
# A[9] * A[11]
movq 88(%rsi), %rax
mulq 72(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
# A[10] * A[10]
movq 80(%rsi), %rax
mulq %rax
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 160(%rdi)
# A[10] * A[11]
movq 88(%rsi), %rax
mulq 80(%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 168(%rdi)
# A[11] * A[11]
movq 88(%rsi), %rax
mulq %rax
addq %rax, %r8
adcq %rdx, %r9
movq %r8, 176(%rdi)
movq %r9, 184(%rdi)
movq (%rsp), %rax
movq 8(%rsp), %rdx
movq 16(%rsp), %r10
movq 24(%rsp), %r11
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq 32(%rsp), %rax
movq 40(%rsp), %rdx
movq 48(%rsp), %r10
movq 56(%rsp), %r11
movq %rax, 32(%rdi)
movq %rdx, 40(%rdi)
movq %r10, 48(%rdi)
movq %r11, 56(%rdi)
movq 64(%rsp), %rax
movq 72(%rsp), %rdx
movq 80(%rsp), %r10
movq 88(%rsp), %r11
movq %rax, 64(%rdi)
movq %rdx, 72(%rdi)
movq %r10, 80(%rdi)
movq %r11, 88(%rdi)
addq $0x60, %rsp
popq %r12
repz retq
#ifndef __APPLE__
.size sp_3072_sqr_12,.-sp_3072_sqr_12
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Square a and put result in r. (r = a * a)
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_sqr_avx2_12
.type sp_3072_sqr_avx2_12,@function
.align 16
sp_3072_sqr_avx2_12:
#else
.section __TEXT,__text
.globl _sp_3072_sqr_avx2_12
.p2align 4
_sp_3072_sqr_avx2_12:
#endif /* __APPLE__ */
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
subq $0x60, %rsp
cmpq %rdi, %rsi
movq %rsp, %rbp
cmovne %rdi, %rbp
addq $0x60, %rdi
xorq %r10, %r10
# Diagonal 1
# Zero into %r9
# A[1] x A[0]
movq (%rsi), %rdx
mulxq 8(%rsi), %r8, %r9
movq %r8, 8(%rbp)
# Zero into %r8
# A[2] x A[0]
mulxq 16(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 16(%rbp)
# Zero into %r9
# A[3] x A[0]
mulxq 24(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, 24(%rbp)
# Zero into %r8
# A[4] x A[0]
mulxq 32(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 32(%rbp)
# Zero into %r9
# A[5] x A[0]
mulxq 40(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, 40(%rbp)
# No load %r12 - %r8
# A[6] x A[0]
mulxq 48(%rsi), %rax, %r12
adcxq %rax, %r9
adoxq %r10, %r12
movq %r9, 48(%rbp)
# No load %r13 - %r9
# A[7] x A[0]
mulxq 56(%rsi), %rax, %r13
adcxq %rax, %r12
adoxq %r10, %r13
# No store %r12 - %r8
# No load %r14 - %r8
# A[8] x A[0]
mulxq 64(%rsi), %rax, %r14
adcxq %rax, %r13
adoxq %r10, %r14
# No store %r13 - %r9
# No load %r15 - %r9
# A[9] x A[0]
mulxq 72(%rsi), %rax, %r15
adcxq %rax, %r14
adoxq %r10, %r15
# No store %r14 - %r8
# No load %rbx - %r8
# A[10] x A[0]
mulxq 80(%rsi), %rax, %rbx
adcxq %rax, %r15
adoxq %r10, %rbx
# No store %r15 - %r9
# Zero into %r9
# A[11] x A[0]
mulxq 88(%rsi), %rax, %r9
adcxq %rax, %rbx
adoxq %r10, %r9
# No store %rbx - %r8
# Carry
adcxq %r10, %r9
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r9, (%rdi)
# Diagonal 2
movq 24(%rbp), %r9
movq 32(%rbp), %r8
# A[2] x A[1]
movq 8(%rsi), %rdx
mulxq 16(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 24(%rbp)
movq 40(%rbp), %r9
# A[3] x A[1]
mulxq 24(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 32(%rbp)
movq 48(%rbp), %r8
# A[4] x A[1]
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 40(%rbp)
# No load %r12 - %r9
# A[5] x A[1]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r12
movq %r8, 48(%rbp)
# No load %r13 - %r8
# A[6] x A[1]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r13
# No store %r12 - %r9
# No load %r14 - %r9
# A[7] x A[1]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
# No store %r13 - %r8
# No load %r15 - %r8
# A[8] x A[1]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# No store %r14 - %r9
# No load %rbx - %r9
# A[9] x A[1]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r15 - %r8
movq (%rdi), %r8
# A[10] x A[1]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r8
# No store %rbx - %r9
# Zero into %r9
# A[11] x A[1]
mulxq 88(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, (%rdi)
# Zero into %r8
# A[11] x A[2]
movq 16(%rsi), %rdx
mulxq 88(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 8(%rdi)
# Carry
adcxq %r11, %r8
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r8, 16(%rdi)
# Diagonal 3
movq 40(%rbp), %r8
movq 48(%rbp), %r9
# A[3] x A[2]
mulxq 24(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 40(%rbp)
# No load %r12 - %r8
# A[4] x A[2]
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r12
movq %r9, 48(%rbp)
# No load %r13 - %r9
# A[5] x A[2]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r13
# No store %r12 - %r8
# No load %r14 - %r8
# A[6] x A[2]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
# No store %r13 - %r9
# No load %r15 - %r9
# A[7] x A[2]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# No store %r14 - %r8
# No load %rbx - %r8
# A[8] x A[2]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r15 - %r9
movq (%rdi), %r9
# A[9] x A[2]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r9
# No store %rbx - %r8
movq 8(%rdi), %r8
# A[10] x A[2]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, (%rdi)
movq 16(%rdi), %r9
# A[10] x A[3]
movq 24(%rsi), %rdx
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 8(%rdi)
# Zero into %r8
# A[10] x A[4]
movq 32(%rsi), %rdx
mulxq 80(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 16(%rdi)
# Zero into %r9
# A[10] x A[5]
movq 40(%rsi), %rdx
mulxq 80(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, 24(%rdi)
# Carry
adcxq %r11, %r9
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r9, 32(%rdi)
# Diagonal 4
# No load %r13 - %r8
# A[4] x A[3]
movq 24(%rsi), %rdx
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r13
# No store %r12 - %r9
# No load %r14 - %r9
# A[5] x A[3]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
# No store %r13 - %r8
# No load %r15 - %r8
# A[6] x A[3]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# No store %r14 - %r9
# No load %rbx - %r9
# A[7] x A[3]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r15 - %r8
movq (%rdi), %r8
# A[8] x A[3]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r8
# No store %rbx - %r9
movq 8(%rdi), %r9
# A[9] x A[3]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, (%rdi)
movq 16(%rdi), %r8
# A[9] x A[4]
movq 32(%rsi), %rdx
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 8(%rdi)
movq 24(%rdi), %r9
# A[9] x A[5]
movq 40(%rsi), %rdx
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 16(%rdi)
movq 32(%rdi), %r8
# A[9] x A[6]
movq 48(%rsi), %rdx
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 24(%rdi)
# Zero into %r9
# A[9] x A[7]
movq 56(%rsi), %rdx
mulxq 72(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, 32(%rdi)
# Zero into %r8
# A[9] x A[8]
movq 64(%rsi), %rdx
mulxq 72(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 40(%rdi)
# Carry
adcxq %r11, %r8
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r8, 48(%rdi)
# Diagonal 5
# No load %r15 - %r9
# A[5] x A[4]
movq 32(%rsi), %rdx
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# No store %r14 - %r8
# No load %rbx - %r8
# A[6] x A[4]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r15 - %r9
movq (%rdi), %r9
# A[7] x A[4]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r9
# No store %rbx - %r8
movq 8(%rdi), %r8
# A[8] x A[4]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, (%rdi)
movq 16(%rdi), %r9
# A[8] x A[5]
movq 40(%rsi), %rdx
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 8(%rdi)
movq 24(%rdi), %r8
# A[8] x A[6]
movq 48(%rsi), %rdx
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 16(%rdi)
movq 32(%rdi), %r9
# A[8] x A[7]
movq 56(%rsi), %rdx
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 24(%rdi)
movq 40(%rdi), %r8
# A[10] x A[6]
movq 48(%rsi), %rdx
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 32(%rdi)
movq 48(%rdi), %r9
# A[10] x A[7]
movq 56(%rsi), %rdx
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 40(%rdi)
# Zero into %r8
# A[10] x A[8]
movq 64(%rsi), %rdx
mulxq 80(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 48(%rdi)
# Zero into %r9
# A[10] x A[9]
movq 72(%rsi), %rdx
mulxq 80(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, 56(%rdi)
# Carry
adcxq %r11, %r9
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r9, 64(%rdi)
# Diagonal 6
movq (%rdi), %r8
# A[6] x A[5]
movq 40(%rsi), %rdx
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r8
# No store %rbx - %r9
movq 8(%rdi), %r9
# A[7] x A[5]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, (%rdi)
movq 16(%rdi), %r8
# A[7] x A[6]
movq 48(%rsi), %rdx
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 8(%rdi)
movq 24(%rdi), %r9
# A[11] x A[3]
movq 24(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 16(%rdi)
movq 32(%rdi), %r8
# A[11] x A[4]
movq 32(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 24(%rdi)
movq 40(%rdi), %r9
# A[11] x A[5]
movq 40(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 32(%rdi)
movq 48(%rdi), %r8
# A[11] x A[6]
movq 48(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 40(%rdi)
movq 56(%rdi), %r9
# A[11] x A[7]
movq 56(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 48(%rdi)
movq 64(%rdi), %r8
# A[11] x A[8]
movq 64(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 56(%rdi)
# Zero into %r9
# A[11] x A[9]
movq 72(%rsi), %rdx
mulxq 88(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, 64(%rdi)
# Zero into %r8
# A[11] x A[10]
movq 80(%rsi), %rdx
mulxq 88(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 72(%rdi)
# Carry
adcxq %r11, %r8
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r8, 80(%rdi)
movq %r11, 88(%rdi)
# Double and Add in A[i] x A[i]
movq 8(%rbp), %r9
# A[0] x A[0]
movq (%rsi), %rdx
mulxq %rdx, %rax, %rcx
movq %rax, (%rbp)
adoxq %r9, %r9
adcxq %rcx, %r9
movq %r9, 8(%rbp)
movq 16(%rbp), %r8
movq 24(%rbp), %r9
# A[1] x A[1]
movq 8(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 16(%rbp)
movq %r9, 24(%rbp)
movq 32(%rbp), %r8
movq 40(%rbp), %r9
# A[2] x A[2]
movq 16(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 32(%rbp)
movq %r9, 40(%rbp)
movq 48(%rbp), %r8
# A[3] x A[3]
movq 24(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r12, %r12
adcxq %rax, %r8
adcxq %rcx, %r12
movq %r8, 48(%rbp)
# A[4] x A[4]
movq 32(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r13, %r13
adoxq %r14, %r14
adcxq %rax, %r13
adcxq %rcx, %r14
# A[5] x A[5]
movq 40(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r15, %r15
adoxq %rbx, %rbx
adcxq %rax, %r15
adcxq %rcx, %rbx
movq (%rdi), %r8
movq 8(%rdi), %r9
# A[6] x A[6]
movq 48(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq 16(%rdi), %r8
movq 24(%rdi), %r9
# A[7] x A[7]
movq 56(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rdi), %r8
movq 40(%rdi), %r9
# A[8] x A[8]
movq 64(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 32(%rdi)
movq %r9, 40(%rdi)
movq 48(%rdi), %r8
movq 56(%rdi), %r9
# A[9] x A[9]
movq 72(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rdi), %r8
movq 72(%rdi), %r9
# A[10] x A[10]
movq 80(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 64(%rdi)
movq %r9, 72(%rdi)
movq 80(%rdi), %r8
movq 88(%rdi), %r9
# A[11] x A[11]
movq 88(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
movq %r12, -40(%rdi)
movq %r13, -32(%rdi)
movq %r14, -24(%rdi)
movq %r15, -16(%rdi)
movq %rbx, -8(%rdi)
subq $0x60, %rdi
cmpq %rdi, %rsi
jne L_end_3072_sqr_avx2_12
vmovdqu (%rbp), %xmm0
vmovups %xmm0, (%rdi)
vmovdqu 16(%rbp), %xmm0
vmovups %xmm0, 16(%rdi)
vmovdqu 32(%rbp), %xmm0
vmovups %xmm0, 32(%rdi)
movq 48(%rbp), %rax
movq %rax, 48(%rdi)
L_end_3072_sqr_avx2_12:
addq $0x60, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
repz retq
#ifndef __APPLE__
.size sp_3072_sqr_avx2_12,.-sp_3072_sqr_avx2_12
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Square a and put result in r. (r = a * a)
*
* Karatsuba: ah^2, al^2, (al - ah)^2
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_sqr_24
.type sp_3072_sqr_24,@function
.align 16
sp_3072_sqr_24:
#else
.section __TEXT,__text
.globl _sp_3072_sqr_24
.p2align 4
_sp_3072_sqr_24:
#endif /* __APPLE__ */
subq $0xd0, %rsp
movq %rdi, 192(%rsp)
movq %rsi, 200(%rsp)
movq $0x00, %rcx
movq %rsp, %r8
leaq 96(%rsi), %r9
movq (%rsi), %rdx
subq (%r9), %rdx
movq 8(%rsi), %rax
movq %rdx, (%r8)
sbbq 8(%r9), %rax
movq 16(%rsi), %rdx
movq %rax, 8(%r8)
sbbq 16(%r9), %rdx
movq 24(%rsi), %rax
movq %rdx, 16(%r8)
sbbq 24(%r9), %rax
movq 32(%rsi), %rdx
movq %rax, 24(%r8)
sbbq 32(%r9), %rdx
movq 40(%rsi), %rax
movq %rdx, 32(%r8)
sbbq 40(%r9), %rax
movq 48(%rsi), %rdx
movq %rax, 40(%r8)
sbbq 48(%r9), %rdx
movq 56(%rsi), %rax
movq %rdx, 48(%r8)
sbbq 56(%r9), %rax
movq 64(%rsi), %rdx
movq %rax, 56(%r8)
sbbq 64(%r9), %rdx
movq 72(%rsi), %rax
movq %rdx, 64(%r8)
sbbq 72(%r9), %rax
movq 80(%rsi), %rdx
movq %rax, 72(%r8)
sbbq 80(%r9), %rdx
movq 88(%rsi), %rax
movq %rdx, 80(%r8)
sbbq 88(%r9), %rax
movq %rax, 88(%r8)
sbbq $0x00, %rcx
# Cond Negate
movq (%r8), %rdx
movq %rcx, %r9
xorq %rcx, %rdx
negq %r9
subq %rcx, %rdx
movq 8(%r8), %rax
sbbq $0x00, %r9
movq %rdx, (%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 16(%r8), %rdx
setc %r9b
movq %rax, 8(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 24(%r8), %rax
setc %r9b
movq %rdx, 16(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 32(%r8), %rdx
setc %r9b
movq %rax, 24(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 40(%r8), %rax
setc %r9b
movq %rdx, 32(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 48(%r8), %rdx
setc %r9b
movq %rax, 40(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 56(%r8), %rax
setc %r9b
movq %rdx, 48(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 64(%r8), %rdx
setc %r9b
movq %rax, 56(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 72(%r8), %rax
setc %r9b
movq %rdx, 64(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 80(%r8), %rdx
setc %r9b
movq %rax, 72(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 88(%r8), %rax
setc %r9b
movq %rdx, 80(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq %rax, 88(%r8)
movq %r8, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_3072_sqr_12@plt
#else
callq _sp_3072_sqr_12
#endif /* __APPLE__ */
movq 200(%rsp), %rsi
movq 192(%rsp), %rdi
addq $0x60, %rsi
addq $0xc0, %rdi
#ifndef __APPLE__
callq sp_3072_sqr_12@plt
#else
callq _sp_3072_sqr_12
#endif /* __APPLE__ */
movq 200(%rsp), %rsi
movq 192(%rsp), %rdi
#ifndef __APPLE__
callq sp_3072_sqr_12@plt
#else
callq _sp_3072_sqr_12
#endif /* __APPLE__ */
#ifdef _WIN64
movq 200(%rsp), %rsi
movq 192(%rsp), %rdi
#endif /* _WIN64 */
movq 192(%rsp), %rsi
leaq 96(%rsp), %r8
addq $0x120, %rsi
movq $0x00, %rcx
movq -96(%r8), %rax
subq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq %rdx, 88(%r8)
sbbq $0x00, %rcx
subq $0xc0, %rsi
movq -96(%r8), %rax
subq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq %rdx, 88(%r8)
sbbq $0x00, %rcx
movq 192(%rsp), %rdi
negq %rcx
addq $0xc0, %rdi
movq -96(%rdi), %rax
subq -96(%r8), %rax
movq -88(%rdi), %rdx
movq %rax, -96(%rdi)
sbbq -88(%r8), %rdx
movq -80(%rdi), %rax
movq %rdx, -88(%rdi)
sbbq -80(%r8), %rax
movq -72(%rdi), %rdx
movq %rax, -80(%rdi)
sbbq -72(%r8), %rdx
movq -64(%rdi), %rax
movq %rdx, -72(%rdi)
sbbq -64(%r8), %rax
movq -56(%rdi), %rdx
movq %rax, -64(%rdi)
sbbq -56(%r8), %rdx
movq -48(%rdi), %rax
movq %rdx, -56(%rdi)
sbbq -48(%r8), %rax
movq -40(%rdi), %rdx
movq %rax, -48(%rdi)
sbbq -40(%r8), %rdx
movq -32(%rdi), %rax
movq %rdx, -40(%rdi)
sbbq -32(%r8), %rax
movq -24(%rdi), %rdx
movq %rax, -32(%rdi)
sbbq -24(%r8), %rdx
movq -16(%rdi), %rax
movq %rdx, -24(%rdi)
sbbq -16(%r8), %rax
movq -8(%rdi), %rdx
movq %rax, -16(%rdi)
sbbq -8(%r8), %rdx
movq (%rdi), %rax
movq %rdx, -8(%rdi)
sbbq (%r8), %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
sbbq 8(%r8), %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
sbbq 16(%r8), %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
sbbq 24(%r8), %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
sbbq 32(%r8), %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
sbbq 40(%r8), %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
sbbq 48(%r8), %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
sbbq 56(%r8), %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
sbbq 64(%r8), %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
sbbq 72(%r8), %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
sbbq 80(%r8), %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
sbbq 88(%r8), %rdx
movq %rdx, 88(%rdi)
sbbq $0x00, %rcx
movq 192(%rsp), %rdi
addq $0x120, %rdi
# Add in word
movq (%rdi), %rax
addq %rcx, %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
adcq $0x00, %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
adcq $0x00, %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
adcq $0x00, %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
adcq $0x00, %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
adcq $0x00, %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
adcq $0x00, %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
adcq $0x00, %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
adcq $0x00, %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
adcq $0x00, %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
adcq $0x00, %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
adcq $0x00, %rdx
movq %rdx, 88(%rdi)
movq 200(%rsp), %rsi
movq 192(%rsp), %rdi
addq $0xd0, %rsp
repz retq
#ifndef __APPLE__
.size sp_3072_sqr_24,.-sp_3072_sqr_24
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Square a and put result in r. (r = a * a)
*
* Karatsuba: ah^2, al^2, (al - ah)^2
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_sqr_avx2_24
.type sp_3072_sqr_avx2_24,@function
.align 16
sp_3072_sqr_avx2_24:
#else
.section __TEXT,__text
.globl _sp_3072_sqr_avx2_24
.p2align 4
_sp_3072_sqr_avx2_24:
#endif /* __APPLE__ */
subq $0xd0, %rsp
movq %rdi, 192(%rsp)
movq %rsi, 200(%rsp)
movq $0x00, %rcx
movq %rsp, %r8
leaq 96(%rsi), %r9
movq (%rsi), %rdx
subq (%r9), %rdx
movq 8(%rsi), %rax
movq %rdx, (%r8)
sbbq 8(%r9), %rax
movq 16(%rsi), %rdx
movq %rax, 8(%r8)
sbbq 16(%r9), %rdx
movq 24(%rsi), %rax
movq %rdx, 16(%r8)
sbbq 24(%r9), %rax
movq 32(%rsi), %rdx
movq %rax, 24(%r8)
sbbq 32(%r9), %rdx
movq 40(%rsi), %rax
movq %rdx, 32(%r8)
sbbq 40(%r9), %rax
movq 48(%rsi), %rdx
movq %rax, 40(%r8)
sbbq 48(%r9), %rdx
movq 56(%rsi), %rax
movq %rdx, 48(%r8)
sbbq 56(%r9), %rax
movq 64(%rsi), %rdx
movq %rax, 56(%r8)
sbbq 64(%r9), %rdx
movq 72(%rsi), %rax
movq %rdx, 64(%r8)
sbbq 72(%r9), %rax
movq 80(%rsi), %rdx
movq %rax, 72(%r8)
sbbq 80(%r9), %rdx
movq 88(%rsi), %rax
movq %rdx, 80(%r8)
sbbq 88(%r9), %rax
movq %rax, 88(%r8)
sbbq $0x00, %rcx
# Cond Negate
movq (%r8), %rdx
movq %rcx, %r9
xorq %rcx, %rdx
negq %r9
subq %rcx, %rdx
movq 8(%r8), %rax
sbbq $0x00, %r9
movq %rdx, (%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 16(%r8), %rdx
setc %r9b
movq %rax, 8(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 24(%r8), %rax
setc %r9b
movq %rdx, 16(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 32(%r8), %rdx
setc %r9b
movq %rax, 24(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 40(%r8), %rax
setc %r9b
movq %rdx, 32(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 48(%r8), %rdx
setc %r9b
movq %rax, 40(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 56(%r8), %rax
setc %r9b
movq %rdx, 48(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 64(%r8), %rdx
setc %r9b
movq %rax, 56(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 72(%r8), %rax
setc %r9b
movq %rdx, 64(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 80(%r8), %rdx
setc %r9b
movq %rax, 72(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 88(%r8), %rax
setc %r9b
movq %rdx, 80(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq %rax, 88(%r8)
movq %r8, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_3072_sqr_avx2_12@plt
#else
callq _sp_3072_sqr_avx2_12
#endif /* __APPLE__ */
movq 200(%rsp), %rsi
movq 192(%rsp), %rdi
addq $0x60, %rsi
addq $0xc0, %rdi
#ifndef __APPLE__
callq sp_3072_sqr_avx2_12@plt
#else
callq _sp_3072_sqr_avx2_12
#endif /* __APPLE__ */
movq 200(%rsp), %rsi
movq 192(%rsp), %rdi
#ifndef __APPLE__
callq sp_3072_sqr_avx2_12@plt
#else
callq _sp_3072_sqr_avx2_12
#endif /* __APPLE__ */
#ifdef _WIN64
movq 200(%rsp), %rsi
movq 192(%rsp), %rdi
#endif /* _WIN64 */
movq 192(%rsp), %rsi
leaq 96(%rsp), %r8
addq $0x120, %rsi
movq $0x00, %rcx
movq -96(%r8), %rax
subq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq %rdx, 88(%r8)
sbbq $0x00, %rcx
subq $0xc0, %rsi
movq -96(%r8), %rax
subq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq %rdx, 88(%r8)
sbbq $0x00, %rcx
movq 192(%rsp), %rdi
negq %rcx
addq $0xc0, %rdi
movq -96(%rdi), %rax
subq -96(%r8), %rax
movq -88(%rdi), %rdx
movq %rax, -96(%rdi)
sbbq -88(%r8), %rdx
movq -80(%rdi), %rax
movq %rdx, -88(%rdi)
sbbq -80(%r8), %rax
movq -72(%rdi), %rdx
movq %rax, -80(%rdi)
sbbq -72(%r8), %rdx
movq -64(%rdi), %rax
movq %rdx, -72(%rdi)
sbbq -64(%r8), %rax
movq -56(%rdi), %rdx
movq %rax, -64(%rdi)
sbbq -56(%r8), %rdx
movq -48(%rdi), %rax
movq %rdx, -56(%rdi)
sbbq -48(%r8), %rax
movq -40(%rdi), %rdx
movq %rax, -48(%rdi)
sbbq -40(%r8), %rdx
movq -32(%rdi), %rax
movq %rdx, -40(%rdi)
sbbq -32(%r8), %rax
movq -24(%rdi), %rdx
movq %rax, -32(%rdi)
sbbq -24(%r8), %rdx
movq -16(%rdi), %rax
movq %rdx, -24(%rdi)
sbbq -16(%r8), %rax
movq -8(%rdi), %rdx
movq %rax, -16(%rdi)
sbbq -8(%r8), %rdx
movq (%rdi), %rax
movq %rdx, -8(%rdi)
sbbq (%r8), %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
sbbq 8(%r8), %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
sbbq 16(%r8), %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
sbbq 24(%r8), %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
sbbq 32(%r8), %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
sbbq 40(%r8), %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
sbbq 48(%r8), %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
sbbq 56(%r8), %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
sbbq 64(%r8), %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
sbbq 72(%r8), %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
sbbq 80(%r8), %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
sbbq 88(%r8), %rdx
movq %rdx, 88(%rdi)
sbbq $0x00, %rcx
movq 192(%rsp), %rdi
addq $0x120, %rdi
# Add in word
movq (%rdi), %rax
addq %rcx, %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
adcq $0x00, %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
adcq $0x00, %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
adcq $0x00, %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
adcq $0x00, %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
adcq $0x00, %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
adcq $0x00, %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
adcq $0x00, %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
adcq $0x00, %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
adcq $0x00, %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
adcq $0x00, %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
adcq $0x00, %rdx
movq %rdx, 88(%rdi)
movq 200(%rsp), %rsi
movq 192(%rsp), %rdi
addq $0xd0, %rsp
repz retq
#ifndef __APPLE__
.size sp_3072_sqr_avx2_24,.-sp_3072_sqr_avx2_24
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Square a and put result in r. (r = a * a)
*
* Karatsuba: ah^2, al^2, (al - ah)^2
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_sqr_48
.type sp_3072_sqr_48,@function
.align 16
sp_3072_sqr_48:
#else
.section __TEXT,__text
.globl _sp_3072_sqr_48
.p2align 4
_sp_3072_sqr_48:
#endif /* __APPLE__ */
subq $0x190, %rsp
movq %rdi, 384(%rsp)
movq %rsi, 392(%rsp)
movq $0x00, %rcx
movq %rsp, %r8
leaq 192(%rsi), %r9
movq (%rsi), %rdx
subq (%r9), %rdx
movq 8(%rsi), %rax
movq %rdx, (%r8)
sbbq 8(%r9), %rax
movq 16(%rsi), %rdx
movq %rax, 8(%r8)
sbbq 16(%r9), %rdx
movq 24(%rsi), %rax
movq %rdx, 16(%r8)
sbbq 24(%r9), %rax
movq 32(%rsi), %rdx
movq %rax, 24(%r8)
sbbq 32(%r9), %rdx
movq 40(%rsi), %rax
movq %rdx, 32(%r8)
sbbq 40(%r9), %rax
movq 48(%rsi), %rdx
movq %rax, 40(%r8)
sbbq 48(%r9), %rdx
movq 56(%rsi), %rax
movq %rdx, 48(%r8)
sbbq 56(%r9), %rax
movq 64(%rsi), %rdx
movq %rax, 56(%r8)
sbbq 64(%r9), %rdx
movq 72(%rsi), %rax
movq %rdx, 64(%r8)
sbbq 72(%r9), %rax
movq 80(%rsi), %rdx
movq %rax, 72(%r8)
sbbq 80(%r9), %rdx
movq 88(%rsi), %rax
movq %rdx, 80(%r8)
sbbq 88(%r9), %rax
movq 96(%rsi), %rdx
movq %rax, 88(%r8)
sbbq 96(%r9), %rdx
movq 104(%rsi), %rax
movq %rdx, 96(%r8)
sbbq 104(%r9), %rax
movq 112(%rsi), %rdx
movq %rax, 104(%r8)
sbbq 112(%r9), %rdx
movq 120(%rsi), %rax
movq %rdx, 112(%r8)
sbbq 120(%r9), %rax
movq 128(%rsi), %rdx
movq %rax, 120(%r8)
sbbq 128(%r9), %rdx
movq 136(%rsi), %rax
movq %rdx, 128(%r8)
sbbq 136(%r9), %rax
movq 144(%rsi), %rdx
movq %rax, 136(%r8)
sbbq 144(%r9), %rdx
movq 152(%rsi), %rax
movq %rdx, 144(%r8)
sbbq 152(%r9), %rax
movq 160(%rsi), %rdx
movq %rax, 152(%r8)
sbbq 160(%r9), %rdx
movq 168(%rsi), %rax
movq %rdx, 160(%r8)
sbbq 168(%r9), %rax
movq 176(%rsi), %rdx
movq %rax, 168(%r8)
sbbq 176(%r9), %rdx
movq 184(%rsi), %rax
movq %rdx, 176(%r8)
sbbq 184(%r9), %rax
movq %rax, 184(%r8)
sbbq $0x00, %rcx
# Cond Negate
movq (%r8), %rdx
movq %rcx, %r9
xorq %rcx, %rdx
negq %r9
subq %rcx, %rdx
movq 8(%r8), %rax
sbbq $0x00, %r9
movq %rdx, (%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 16(%r8), %rdx
setc %r9b
movq %rax, 8(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 24(%r8), %rax
setc %r9b
movq %rdx, 16(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 32(%r8), %rdx
setc %r9b
movq %rax, 24(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 40(%r8), %rax
setc %r9b
movq %rdx, 32(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 48(%r8), %rdx
setc %r9b
movq %rax, 40(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 56(%r8), %rax
setc %r9b
movq %rdx, 48(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 64(%r8), %rdx
setc %r9b
movq %rax, 56(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 72(%r8), %rax
setc %r9b
movq %rdx, 64(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 80(%r8), %rdx
setc %r9b
movq %rax, 72(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 88(%r8), %rax
setc %r9b
movq %rdx, 80(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 96(%r8), %rdx
setc %r9b
movq %rax, 88(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 104(%r8), %rax
setc %r9b
movq %rdx, 96(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 112(%r8), %rdx
setc %r9b
movq %rax, 104(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 120(%r8), %rax
setc %r9b
movq %rdx, 112(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 128(%r8), %rdx
setc %r9b
movq %rax, 120(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 136(%r8), %rax
setc %r9b
movq %rdx, 128(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 144(%r8), %rdx
setc %r9b
movq %rax, 136(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 152(%r8), %rax
setc %r9b
movq %rdx, 144(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 160(%r8), %rdx
setc %r9b
movq %rax, 152(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 168(%r8), %rax
setc %r9b
movq %rdx, 160(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 176(%r8), %rdx
setc %r9b
movq %rax, 168(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 184(%r8), %rax
setc %r9b
movq %rdx, 176(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq %rax, 184(%r8)
movq %r8, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_3072_sqr_24@plt
#else
callq _sp_3072_sqr_24
#endif /* __APPLE__ */
movq 392(%rsp), %rsi
movq 384(%rsp), %rdi
addq $0xc0, %rsi
addq $0x180, %rdi
#ifndef __APPLE__
callq sp_3072_sqr_24@plt
#else
callq _sp_3072_sqr_24
#endif /* __APPLE__ */
movq 392(%rsp), %rsi
movq 384(%rsp), %rdi
#ifndef __APPLE__
callq sp_3072_sqr_24@plt
#else
callq _sp_3072_sqr_24
#endif /* __APPLE__ */
#ifdef _WIN64
movq 392(%rsp), %rsi
movq 384(%rsp), %rdi
#endif /* _WIN64 */
movq 384(%rsp), %rsi
leaq 192(%rsp), %r8
addq $0x240, %rsi
movq $0x00, %rcx
movq -192(%r8), %rax
subq -192(%rsi), %rax
movq -184(%r8), %rdx
movq %rax, -192(%r8)
sbbq -184(%rsi), %rdx
movq -176(%r8), %rax
movq %rdx, -184(%r8)
sbbq -176(%rsi), %rax
movq -168(%r8), %rdx
movq %rax, -176(%r8)
sbbq -168(%rsi), %rdx
movq -160(%r8), %rax
movq %rdx, -168(%r8)
sbbq -160(%rsi), %rax
movq -152(%r8), %rdx
movq %rax, -160(%r8)
sbbq -152(%rsi), %rdx
movq -144(%r8), %rax
movq %rdx, -152(%r8)
sbbq -144(%rsi), %rax
movq -136(%r8), %rdx
movq %rax, -144(%r8)
sbbq -136(%rsi), %rdx
movq -128(%r8), %rax
movq %rdx, -136(%r8)
sbbq -128(%rsi), %rax
movq -120(%r8), %rdx
movq %rax, -128(%r8)
sbbq -120(%rsi), %rdx
movq -112(%r8), %rax
movq %rdx, -120(%r8)
sbbq -112(%rsi), %rax
movq -104(%r8), %rdx
movq %rax, -112(%r8)
sbbq -104(%rsi), %rdx
movq -96(%r8), %rax
movq %rdx, -104(%r8)
sbbq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq 96(%r8), %rax
movq %rdx, 88(%r8)
sbbq 96(%rsi), %rax
movq 104(%r8), %rdx
movq %rax, 96(%r8)
sbbq 104(%rsi), %rdx
movq 112(%r8), %rax
movq %rdx, 104(%r8)
sbbq 112(%rsi), %rax
movq 120(%r8), %rdx
movq %rax, 112(%r8)
sbbq 120(%rsi), %rdx
movq 128(%r8), %rax
movq %rdx, 120(%r8)
sbbq 128(%rsi), %rax
movq 136(%r8), %rdx
movq %rax, 128(%r8)
sbbq 136(%rsi), %rdx
movq 144(%r8), %rax
movq %rdx, 136(%r8)
sbbq 144(%rsi), %rax
movq 152(%r8), %rdx
movq %rax, 144(%r8)
sbbq 152(%rsi), %rdx
movq 160(%r8), %rax
movq %rdx, 152(%r8)
sbbq 160(%rsi), %rax
movq 168(%r8), %rdx
movq %rax, 160(%r8)
sbbq 168(%rsi), %rdx
movq 176(%r8), %rax
movq %rdx, 168(%r8)
sbbq 176(%rsi), %rax
movq 184(%r8), %rdx
movq %rax, 176(%r8)
sbbq 184(%rsi), %rdx
movq %rdx, 184(%r8)
sbbq $0x00, %rcx
subq $0x180, %rsi
movq -192(%r8), %rax
subq -192(%rsi), %rax
movq -184(%r8), %rdx
movq %rax, -192(%r8)
sbbq -184(%rsi), %rdx
movq -176(%r8), %rax
movq %rdx, -184(%r8)
sbbq -176(%rsi), %rax
movq -168(%r8), %rdx
movq %rax, -176(%r8)
sbbq -168(%rsi), %rdx
movq -160(%r8), %rax
movq %rdx, -168(%r8)
sbbq -160(%rsi), %rax
movq -152(%r8), %rdx
movq %rax, -160(%r8)
sbbq -152(%rsi), %rdx
movq -144(%r8), %rax
movq %rdx, -152(%r8)
sbbq -144(%rsi), %rax
movq -136(%r8), %rdx
movq %rax, -144(%r8)
sbbq -136(%rsi), %rdx
movq -128(%r8), %rax
movq %rdx, -136(%r8)
sbbq -128(%rsi), %rax
movq -120(%r8), %rdx
movq %rax, -128(%r8)
sbbq -120(%rsi), %rdx
movq -112(%r8), %rax
movq %rdx, -120(%r8)
sbbq -112(%rsi), %rax
movq -104(%r8), %rdx
movq %rax, -112(%r8)
sbbq -104(%rsi), %rdx
movq -96(%r8), %rax
movq %rdx, -104(%r8)
sbbq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq 96(%r8), %rax
movq %rdx, 88(%r8)
sbbq 96(%rsi), %rax
movq 104(%r8), %rdx
movq %rax, 96(%r8)
sbbq 104(%rsi), %rdx
movq 112(%r8), %rax
movq %rdx, 104(%r8)
sbbq 112(%rsi), %rax
movq 120(%r8), %rdx
movq %rax, 112(%r8)
sbbq 120(%rsi), %rdx
movq 128(%r8), %rax
movq %rdx, 120(%r8)
sbbq 128(%rsi), %rax
movq 136(%r8), %rdx
movq %rax, 128(%r8)
sbbq 136(%rsi), %rdx
movq 144(%r8), %rax
movq %rdx, 136(%r8)
sbbq 144(%rsi), %rax
movq 152(%r8), %rdx
movq %rax, 144(%r8)
sbbq 152(%rsi), %rdx
movq 160(%r8), %rax
movq %rdx, 152(%r8)
sbbq 160(%rsi), %rax
movq 168(%r8), %rdx
movq %rax, 160(%r8)
sbbq 168(%rsi), %rdx
movq 176(%r8), %rax
movq %rdx, 168(%r8)
sbbq 176(%rsi), %rax
movq 184(%r8), %rdx
movq %rax, 176(%r8)
sbbq 184(%rsi), %rdx
movq %rdx, 184(%r8)
sbbq $0x00, %rcx
movq 384(%rsp), %rdi
negq %rcx
addq $0x180, %rdi
movq -192(%rdi), %rax
subq -192(%r8), %rax
movq -184(%rdi), %rdx
movq %rax, -192(%rdi)
sbbq -184(%r8), %rdx
movq -176(%rdi), %rax
movq %rdx, -184(%rdi)
sbbq -176(%r8), %rax
movq -168(%rdi), %rdx
movq %rax, -176(%rdi)
sbbq -168(%r8), %rdx
movq -160(%rdi), %rax
movq %rdx, -168(%rdi)
sbbq -160(%r8), %rax
movq -152(%rdi), %rdx
movq %rax, -160(%rdi)
sbbq -152(%r8), %rdx
movq -144(%rdi), %rax
movq %rdx, -152(%rdi)
sbbq -144(%r8), %rax
movq -136(%rdi), %rdx
movq %rax, -144(%rdi)
sbbq -136(%r8), %rdx
movq -128(%rdi), %rax
movq %rdx, -136(%rdi)
sbbq -128(%r8), %rax
movq -120(%rdi), %rdx
movq %rax, -128(%rdi)
sbbq -120(%r8), %rdx
movq -112(%rdi), %rax
movq %rdx, -120(%rdi)
sbbq -112(%r8), %rax
movq -104(%rdi), %rdx
movq %rax, -112(%rdi)
sbbq -104(%r8), %rdx
movq -96(%rdi), %rax
movq %rdx, -104(%rdi)
sbbq -96(%r8), %rax
movq -88(%rdi), %rdx
movq %rax, -96(%rdi)
sbbq -88(%r8), %rdx
movq -80(%rdi), %rax
movq %rdx, -88(%rdi)
sbbq -80(%r8), %rax
movq -72(%rdi), %rdx
movq %rax, -80(%rdi)
sbbq -72(%r8), %rdx
movq -64(%rdi), %rax
movq %rdx, -72(%rdi)
sbbq -64(%r8), %rax
movq -56(%rdi), %rdx
movq %rax, -64(%rdi)
sbbq -56(%r8), %rdx
movq -48(%rdi), %rax
movq %rdx, -56(%rdi)
sbbq -48(%r8), %rax
movq -40(%rdi), %rdx
movq %rax, -48(%rdi)
sbbq -40(%r8), %rdx
movq -32(%rdi), %rax
movq %rdx, -40(%rdi)
sbbq -32(%r8), %rax
movq -24(%rdi), %rdx
movq %rax, -32(%rdi)
sbbq -24(%r8), %rdx
movq -16(%rdi), %rax
movq %rdx, -24(%rdi)
sbbq -16(%r8), %rax
movq -8(%rdi), %rdx
movq %rax, -16(%rdi)
sbbq -8(%r8), %rdx
movq (%rdi), %rax
movq %rdx, -8(%rdi)
sbbq (%r8), %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
sbbq 8(%r8), %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
sbbq 16(%r8), %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
sbbq 24(%r8), %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
sbbq 32(%r8), %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
sbbq 40(%r8), %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
sbbq 48(%r8), %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
sbbq 56(%r8), %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
sbbq 64(%r8), %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
sbbq 72(%r8), %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
sbbq 80(%r8), %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
sbbq 88(%r8), %rdx
movq 96(%rdi), %rax
movq %rdx, 88(%rdi)
sbbq 96(%r8), %rax
movq 104(%rdi), %rdx
movq %rax, 96(%rdi)
sbbq 104(%r8), %rdx
movq 112(%rdi), %rax
movq %rdx, 104(%rdi)
sbbq 112(%r8), %rax
movq 120(%rdi), %rdx
movq %rax, 112(%rdi)
sbbq 120(%r8), %rdx
movq 128(%rdi), %rax
movq %rdx, 120(%rdi)
sbbq 128(%r8), %rax
movq 136(%rdi), %rdx
movq %rax, 128(%rdi)
sbbq 136(%r8), %rdx
movq 144(%rdi), %rax
movq %rdx, 136(%rdi)
sbbq 144(%r8), %rax
movq 152(%rdi), %rdx
movq %rax, 144(%rdi)
sbbq 152(%r8), %rdx
movq 160(%rdi), %rax
movq %rdx, 152(%rdi)
sbbq 160(%r8), %rax
movq 168(%rdi), %rdx
movq %rax, 160(%rdi)
sbbq 168(%r8), %rdx
movq 176(%rdi), %rax
movq %rdx, 168(%rdi)
sbbq 176(%r8), %rax
movq 184(%rdi), %rdx
movq %rax, 176(%rdi)
sbbq 184(%r8), %rdx
movq %rdx, 184(%rdi)
sbbq $0x00, %rcx
movq 384(%rsp), %rdi
addq $0x240, %rdi
# Add in word
movq (%rdi), %rax
addq %rcx, %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
adcq $0x00, %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
adcq $0x00, %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
adcq $0x00, %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
adcq $0x00, %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
adcq $0x00, %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
adcq $0x00, %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
adcq $0x00, %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
adcq $0x00, %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
adcq $0x00, %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
adcq $0x00, %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
adcq $0x00, %rdx
movq 96(%rdi), %rax
movq %rdx, 88(%rdi)
adcq $0x00, %rax
movq 104(%rdi), %rdx
movq %rax, 96(%rdi)
adcq $0x00, %rdx
movq 112(%rdi), %rax
movq %rdx, 104(%rdi)
adcq $0x00, %rax
movq 120(%rdi), %rdx
movq %rax, 112(%rdi)
adcq $0x00, %rdx
movq 128(%rdi), %rax
movq %rdx, 120(%rdi)
adcq $0x00, %rax
movq 136(%rdi), %rdx
movq %rax, 128(%rdi)
adcq $0x00, %rdx
movq 144(%rdi), %rax
movq %rdx, 136(%rdi)
adcq $0x00, %rax
movq 152(%rdi), %rdx
movq %rax, 144(%rdi)
adcq $0x00, %rdx
movq 160(%rdi), %rax
movq %rdx, 152(%rdi)
adcq $0x00, %rax
movq 168(%rdi), %rdx
movq %rax, 160(%rdi)
adcq $0x00, %rdx
movq 176(%rdi), %rax
movq %rdx, 168(%rdi)
adcq $0x00, %rax
movq 184(%rdi), %rdx
movq %rax, 176(%rdi)
adcq $0x00, %rdx
movq %rdx, 184(%rdi)
movq 392(%rsp), %rsi
movq 384(%rsp), %rdi
addq $0x190, %rsp
repz retq
#ifndef __APPLE__
.size sp_3072_sqr_48,.-sp_3072_sqr_48
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Square a and put result in r. (r = a * a)
*
* Karatsuba: ah^2, al^2, (al - ah)^2
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_sqr_avx2_48
.type sp_3072_sqr_avx2_48,@function
.align 16
sp_3072_sqr_avx2_48:
#else
.section __TEXT,__text
.globl _sp_3072_sqr_avx2_48
.p2align 4
_sp_3072_sqr_avx2_48:
#endif /* __APPLE__ */
subq $0x190, %rsp
movq %rdi, 384(%rsp)
movq %rsi, 392(%rsp)
movq $0x00, %rcx
movq %rsp, %r8
leaq 192(%rsi), %r9
movq (%rsi), %rdx
subq (%r9), %rdx
movq 8(%rsi), %rax
movq %rdx, (%r8)
sbbq 8(%r9), %rax
movq 16(%rsi), %rdx
movq %rax, 8(%r8)
sbbq 16(%r9), %rdx
movq 24(%rsi), %rax
movq %rdx, 16(%r8)
sbbq 24(%r9), %rax
movq 32(%rsi), %rdx
movq %rax, 24(%r8)
sbbq 32(%r9), %rdx
movq 40(%rsi), %rax
movq %rdx, 32(%r8)
sbbq 40(%r9), %rax
movq 48(%rsi), %rdx
movq %rax, 40(%r8)
sbbq 48(%r9), %rdx
movq 56(%rsi), %rax
movq %rdx, 48(%r8)
sbbq 56(%r9), %rax
movq 64(%rsi), %rdx
movq %rax, 56(%r8)
sbbq 64(%r9), %rdx
movq 72(%rsi), %rax
movq %rdx, 64(%r8)
sbbq 72(%r9), %rax
movq 80(%rsi), %rdx
movq %rax, 72(%r8)
sbbq 80(%r9), %rdx
movq 88(%rsi), %rax
movq %rdx, 80(%r8)
sbbq 88(%r9), %rax
movq 96(%rsi), %rdx
movq %rax, 88(%r8)
sbbq 96(%r9), %rdx
movq 104(%rsi), %rax
movq %rdx, 96(%r8)
sbbq 104(%r9), %rax
movq 112(%rsi), %rdx
movq %rax, 104(%r8)
sbbq 112(%r9), %rdx
movq 120(%rsi), %rax
movq %rdx, 112(%r8)
sbbq 120(%r9), %rax
movq 128(%rsi), %rdx
movq %rax, 120(%r8)
sbbq 128(%r9), %rdx
movq 136(%rsi), %rax
movq %rdx, 128(%r8)
sbbq 136(%r9), %rax
movq 144(%rsi), %rdx
movq %rax, 136(%r8)
sbbq 144(%r9), %rdx
movq 152(%rsi), %rax
movq %rdx, 144(%r8)
sbbq 152(%r9), %rax
movq 160(%rsi), %rdx
movq %rax, 152(%r8)
sbbq 160(%r9), %rdx
movq 168(%rsi), %rax
movq %rdx, 160(%r8)
sbbq 168(%r9), %rax
movq 176(%rsi), %rdx
movq %rax, 168(%r8)
sbbq 176(%r9), %rdx
movq 184(%rsi), %rax
movq %rdx, 176(%r8)
sbbq 184(%r9), %rax
movq %rax, 184(%r8)
sbbq $0x00, %rcx
# Cond Negate
movq (%r8), %rdx
movq %rcx, %r9
xorq %rcx, %rdx
negq %r9
subq %rcx, %rdx
movq 8(%r8), %rax
sbbq $0x00, %r9
movq %rdx, (%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 16(%r8), %rdx
setc %r9b
movq %rax, 8(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 24(%r8), %rax
setc %r9b
movq %rdx, 16(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 32(%r8), %rdx
setc %r9b
movq %rax, 24(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 40(%r8), %rax
setc %r9b
movq %rdx, 32(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 48(%r8), %rdx
setc %r9b
movq %rax, 40(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 56(%r8), %rax
setc %r9b
movq %rdx, 48(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 64(%r8), %rdx
setc %r9b
movq %rax, 56(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 72(%r8), %rax
setc %r9b
movq %rdx, 64(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 80(%r8), %rdx
setc %r9b
movq %rax, 72(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 88(%r8), %rax
setc %r9b
movq %rdx, 80(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 96(%r8), %rdx
setc %r9b
movq %rax, 88(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 104(%r8), %rax
setc %r9b
movq %rdx, 96(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 112(%r8), %rdx
setc %r9b
movq %rax, 104(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 120(%r8), %rax
setc %r9b
movq %rdx, 112(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 128(%r8), %rdx
setc %r9b
movq %rax, 120(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 136(%r8), %rax
setc %r9b
movq %rdx, 128(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 144(%r8), %rdx
setc %r9b
movq %rax, 136(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 152(%r8), %rax
setc %r9b
movq %rdx, 144(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 160(%r8), %rdx
setc %r9b
movq %rax, 152(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 168(%r8), %rax
setc %r9b
movq %rdx, 160(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 176(%r8), %rdx
setc %r9b
movq %rax, 168(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 184(%r8), %rax
setc %r9b
movq %rdx, 176(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq %rax, 184(%r8)
movq %r8, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_3072_sqr_avx2_24@plt
#else
callq _sp_3072_sqr_avx2_24
#endif /* __APPLE__ */
movq 392(%rsp), %rsi
movq 384(%rsp), %rdi
addq $0xc0, %rsi
addq $0x180, %rdi
#ifndef __APPLE__
callq sp_3072_sqr_avx2_24@plt
#else
callq _sp_3072_sqr_avx2_24
#endif /* __APPLE__ */
movq 392(%rsp), %rsi
movq 384(%rsp), %rdi
#ifndef __APPLE__
callq sp_3072_sqr_avx2_24@plt
#else
callq _sp_3072_sqr_avx2_24
#endif /* __APPLE__ */
#ifdef _WIN64
movq 392(%rsp), %rsi
movq 384(%rsp), %rdi
#endif /* _WIN64 */
movq 384(%rsp), %rsi
leaq 192(%rsp), %r8
addq $0x240, %rsi
movq $0x00, %rcx
movq -192(%r8), %rax
subq -192(%rsi), %rax
movq -184(%r8), %rdx
movq %rax, -192(%r8)
sbbq -184(%rsi), %rdx
movq -176(%r8), %rax
movq %rdx, -184(%r8)
sbbq -176(%rsi), %rax
movq -168(%r8), %rdx
movq %rax, -176(%r8)
sbbq -168(%rsi), %rdx
movq -160(%r8), %rax
movq %rdx, -168(%r8)
sbbq -160(%rsi), %rax
movq -152(%r8), %rdx
movq %rax, -160(%r8)
sbbq -152(%rsi), %rdx
movq -144(%r8), %rax
movq %rdx, -152(%r8)
sbbq -144(%rsi), %rax
movq -136(%r8), %rdx
movq %rax, -144(%r8)
sbbq -136(%rsi), %rdx
movq -128(%r8), %rax
movq %rdx, -136(%r8)
sbbq -128(%rsi), %rax
movq -120(%r8), %rdx
movq %rax, -128(%r8)
sbbq -120(%rsi), %rdx
movq -112(%r8), %rax
movq %rdx, -120(%r8)
sbbq -112(%rsi), %rax
movq -104(%r8), %rdx
movq %rax, -112(%r8)
sbbq -104(%rsi), %rdx
movq -96(%r8), %rax
movq %rdx, -104(%r8)
sbbq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq 96(%r8), %rax
movq %rdx, 88(%r8)
sbbq 96(%rsi), %rax
movq 104(%r8), %rdx
movq %rax, 96(%r8)
sbbq 104(%rsi), %rdx
movq 112(%r8), %rax
movq %rdx, 104(%r8)
sbbq 112(%rsi), %rax
movq 120(%r8), %rdx
movq %rax, 112(%r8)
sbbq 120(%rsi), %rdx
movq 128(%r8), %rax
movq %rdx, 120(%r8)
sbbq 128(%rsi), %rax
movq 136(%r8), %rdx
movq %rax, 128(%r8)
sbbq 136(%rsi), %rdx
movq 144(%r8), %rax
movq %rdx, 136(%r8)
sbbq 144(%rsi), %rax
movq 152(%r8), %rdx
movq %rax, 144(%r8)
sbbq 152(%rsi), %rdx
movq 160(%r8), %rax
movq %rdx, 152(%r8)
sbbq 160(%rsi), %rax
movq 168(%r8), %rdx
movq %rax, 160(%r8)
sbbq 168(%rsi), %rdx
movq 176(%r8), %rax
movq %rdx, 168(%r8)
sbbq 176(%rsi), %rax
movq 184(%r8), %rdx
movq %rax, 176(%r8)
sbbq 184(%rsi), %rdx
movq %rdx, 184(%r8)
sbbq $0x00, %rcx
subq $0x180, %rsi
movq -192(%r8), %rax
subq -192(%rsi), %rax
movq -184(%r8), %rdx
movq %rax, -192(%r8)
sbbq -184(%rsi), %rdx
movq -176(%r8), %rax
movq %rdx, -184(%r8)
sbbq -176(%rsi), %rax
movq -168(%r8), %rdx
movq %rax, -176(%r8)
sbbq -168(%rsi), %rdx
movq -160(%r8), %rax
movq %rdx, -168(%r8)
sbbq -160(%rsi), %rax
movq -152(%r8), %rdx
movq %rax, -160(%r8)
sbbq -152(%rsi), %rdx
movq -144(%r8), %rax
movq %rdx, -152(%r8)
sbbq -144(%rsi), %rax
movq -136(%r8), %rdx
movq %rax, -144(%r8)
sbbq -136(%rsi), %rdx
movq -128(%r8), %rax
movq %rdx, -136(%r8)
sbbq -128(%rsi), %rax
movq -120(%r8), %rdx
movq %rax, -128(%r8)
sbbq -120(%rsi), %rdx
movq -112(%r8), %rax
movq %rdx, -120(%r8)
sbbq -112(%rsi), %rax
movq -104(%r8), %rdx
movq %rax, -112(%r8)
sbbq -104(%rsi), %rdx
movq -96(%r8), %rax
movq %rdx, -104(%r8)
sbbq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq 96(%r8), %rax
movq %rdx, 88(%r8)
sbbq 96(%rsi), %rax
movq 104(%r8), %rdx
movq %rax, 96(%r8)
sbbq 104(%rsi), %rdx
movq 112(%r8), %rax
movq %rdx, 104(%r8)
sbbq 112(%rsi), %rax
movq 120(%r8), %rdx
movq %rax, 112(%r8)
sbbq 120(%rsi), %rdx
movq 128(%r8), %rax
movq %rdx, 120(%r8)
sbbq 128(%rsi), %rax
movq 136(%r8), %rdx
movq %rax, 128(%r8)
sbbq 136(%rsi), %rdx
movq 144(%r8), %rax
movq %rdx, 136(%r8)
sbbq 144(%rsi), %rax
movq 152(%r8), %rdx
movq %rax, 144(%r8)
sbbq 152(%rsi), %rdx
movq 160(%r8), %rax
movq %rdx, 152(%r8)
sbbq 160(%rsi), %rax
movq 168(%r8), %rdx
movq %rax, 160(%r8)
sbbq 168(%rsi), %rdx
movq 176(%r8), %rax
movq %rdx, 168(%r8)
sbbq 176(%rsi), %rax
movq 184(%r8), %rdx
movq %rax, 176(%r8)
sbbq 184(%rsi), %rdx
movq %rdx, 184(%r8)
sbbq $0x00, %rcx
movq 384(%rsp), %rdi
negq %rcx
addq $0x180, %rdi
movq -192(%rdi), %rax
subq -192(%r8), %rax
movq -184(%rdi), %rdx
movq %rax, -192(%rdi)
sbbq -184(%r8), %rdx
movq -176(%rdi), %rax
movq %rdx, -184(%rdi)
sbbq -176(%r8), %rax
movq -168(%rdi), %rdx
movq %rax, -176(%rdi)
sbbq -168(%r8), %rdx
movq -160(%rdi), %rax
movq %rdx, -168(%rdi)
sbbq -160(%r8), %rax
movq -152(%rdi), %rdx
movq %rax, -160(%rdi)
sbbq -152(%r8), %rdx
movq -144(%rdi), %rax
movq %rdx, -152(%rdi)
sbbq -144(%r8), %rax
movq -136(%rdi), %rdx
movq %rax, -144(%rdi)
sbbq -136(%r8), %rdx
movq -128(%rdi), %rax
movq %rdx, -136(%rdi)
sbbq -128(%r8), %rax
movq -120(%rdi), %rdx
movq %rax, -128(%rdi)
sbbq -120(%r8), %rdx
movq -112(%rdi), %rax
movq %rdx, -120(%rdi)
sbbq -112(%r8), %rax
movq -104(%rdi), %rdx
movq %rax, -112(%rdi)
sbbq -104(%r8), %rdx
movq -96(%rdi), %rax
movq %rdx, -104(%rdi)
sbbq -96(%r8), %rax
movq -88(%rdi), %rdx
movq %rax, -96(%rdi)
sbbq -88(%r8), %rdx
movq -80(%rdi), %rax
movq %rdx, -88(%rdi)
sbbq -80(%r8), %rax
movq -72(%rdi), %rdx
movq %rax, -80(%rdi)
sbbq -72(%r8), %rdx
movq -64(%rdi), %rax
movq %rdx, -72(%rdi)
sbbq -64(%r8), %rax
movq -56(%rdi), %rdx
movq %rax, -64(%rdi)
sbbq -56(%r8), %rdx
movq -48(%rdi), %rax
movq %rdx, -56(%rdi)
sbbq -48(%r8), %rax
movq -40(%rdi), %rdx
movq %rax, -48(%rdi)
sbbq -40(%r8), %rdx
movq -32(%rdi), %rax
movq %rdx, -40(%rdi)
sbbq -32(%r8), %rax
movq -24(%rdi), %rdx
movq %rax, -32(%rdi)
sbbq -24(%r8), %rdx
movq -16(%rdi), %rax
movq %rdx, -24(%rdi)
sbbq -16(%r8), %rax
movq -8(%rdi), %rdx
movq %rax, -16(%rdi)
sbbq -8(%r8), %rdx
movq (%rdi), %rax
movq %rdx, -8(%rdi)
sbbq (%r8), %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
sbbq 8(%r8), %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
sbbq 16(%r8), %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
sbbq 24(%r8), %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
sbbq 32(%r8), %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
sbbq 40(%r8), %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
sbbq 48(%r8), %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
sbbq 56(%r8), %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
sbbq 64(%r8), %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
sbbq 72(%r8), %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
sbbq 80(%r8), %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
sbbq 88(%r8), %rdx
movq 96(%rdi), %rax
movq %rdx, 88(%rdi)
sbbq 96(%r8), %rax
movq 104(%rdi), %rdx
movq %rax, 96(%rdi)
sbbq 104(%r8), %rdx
movq 112(%rdi), %rax
movq %rdx, 104(%rdi)
sbbq 112(%r8), %rax
movq 120(%rdi), %rdx
movq %rax, 112(%rdi)
sbbq 120(%r8), %rdx
movq 128(%rdi), %rax
movq %rdx, 120(%rdi)
sbbq 128(%r8), %rax
movq 136(%rdi), %rdx
movq %rax, 128(%rdi)
sbbq 136(%r8), %rdx
movq 144(%rdi), %rax
movq %rdx, 136(%rdi)
sbbq 144(%r8), %rax
movq 152(%rdi), %rdx
movq %rax, 144(%rdi)
sbbq 152(%r8), %rdx
movq 160(%rdi), %rax
movq %rdx, 152(%rdi)
sbbq 160(%r8), %rax
movq 168(%rdi), %rdx
movq %rax, 160(%rdi)
sbbq 168(%r8), %rdx
movq 176(%rdi), %rax
movq %rdx, 168(%rdi)
sbbq 176(%r8), %rax
movq 184(%rdi), %rdx
movq %rax, 176(%rdi)
sbbq 184(%r8), %rdx
movq %rdx, 184(%rdi)
sbbq $0x00, %rcx
movq 384(%rsp), %rdi
addq $0x240, %rdi
# Add in word
movq (%rdi), %rax
addq %rcx, %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
adcq $0x00, %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
adcq $0x00, %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
adcq $0x00, %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
adcq $0x00, %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
adcq $0x00, %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
adcq $0x00, %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
adcq $0x00, %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
adcq $0x00, %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
adcq $0x00, %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
adcq $0x00, %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
adcq $0x00, %rdx
movq 96(%rdi), %rax
movq %rdx, 88(%rdi)
adcq $0x00, %rax
movq 104(%rdi), %rdx
movq %rax, 96(%rdi)
adcq $0x00, %rdx
movq 112(%rdi), %rax
movq %rdx, 104(%rdi)
adcq $0x00, %rax
movq 120(%rdi), %rdx
movq %rax, 112(%rdi)
adcq $0x00, %rdx
movq 128(%rdi), %rax
movq %rdx, 120(%rdi)
adcq $0x00, %rax
movq 136(%rdi), %rdx
movq %rax, 128(%rdi)
adcq $0x00, %rdx
movq 144(%rdi), %rax
movq %rdx, 136(%rdi)
adcq $0x00, %rax
movq 152(%rdi), %rdx
movq %rax, 144(%rdi)
adcq $0x00, %rdx
movq 160(%rdi), %rax
movq %rdx, 152(%rdi)
adcq $0x00, %rax
movq 168(%rdi), %rdx
movq %rax, 160(%rdi)
adcq $0x00, %rdx
movq 176(%rdi), %rax
movq %rdx, 168(%rdi)
adcq $0x00, %rax
movq 184(%rdi), %rdx
movq %rax, 176(%rdi)
adcq $0x00, %rdx
movq %rdx, 184(%rdi)
movq 392(%rsp), %rsi
movq 384(%rsp), %rdi
addq $0x190, %rsp
repz retq
#ifndef __APPLE__
.size sp_3072_sqr_avx2_48,.-sp_3072_sqr_avx2_48
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mul_d_48
.type sp_3072_mul_d_48,@function
.align 16
sp_3072_mul_d_48:
#else
.section __TEXT,__text
.globl _sp_3072_mul_d_48
.p2align 4
_sp_3072_mul_d_48:
#endif /* __APPLE__ */
movq %rdx, %rcx
# A[0] * B
movq %rcx, %rax
xorq %r10, %r10
mulq (%rsi)
movq %rax, %r8
movq %rdx, %r9
movq %r8, (%rdi)
# A[1] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 8(%rsi)
addq %rax, %r9
movq %r9, 8(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 16(%rsi)
addq %rax, %r10
movq %r10, 16(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 24(%rsi)
addq %rax, %r8
movq %r8, 24(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 32(%rsi)
addq %rax, %r9
movq %r9, 32(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 40(%rsi)
addq %rax, %r10
movq %r10, 40(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 48(%rsi)
addq %rax, %r8
movq %r8, 48(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 56(%rsi)
addq %rax, %r9
movq %r9, 56(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 64(%rsi)
addq %rax, %r10
movq %r10, 64(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 72(%rsi)
addq %rax, %r8
movq %r8, 72(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 80(%rsi)
addq %rax, %r9
movq %r9, 80(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 88(%rsi)
addq %rax, %r10
movq %r10, 88(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 96(%rsi)
addq %rax, %r8
movq %r8, 96(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 104(%rsi)
addq %rax, %r9
movq %r9, 104(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 112(%rsi)
addq %rax, %r10
movq %r10, 112(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 120(%rsi)
addq %rax, %r8
movq %r8, 120(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[16] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 128(%rsi)
addq %rax, %r9
movq %r9, 128(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[17] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 136(%rsi)
addq %rax, %r10
movq %r10, 136(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[18] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 144(%rsi)
addq %rax, %r8
movq %r8, 144(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[19] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 152(%rsi)
addq %rax, %r9
movq %r9, 152(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[20] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 160(%rsi)
addq %rax, %r10
movq %r10, 160(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[21] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 168(%rsi)
addq %rax, %r8
movq %r8, 168(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[22] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 176(%rsi)
addq %rax, %r9
movq %r9, 176(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[23] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 184(%rsi)
addq %rax, %r10
movq %r10, 184(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[24] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 192(%rsi)
addq %rax, %r8
movq %r8, 192(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[25] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 200(%rsi)
addq %rax, %r9
movq %r9, 200(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[26] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 208(%rsi)
addq %rax, %r10
movq %r10, 208(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[27] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 216(%rsi)
addq %rax, %r8
movq %r8, 216(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[28] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 224(%rsi)
addq %rax, %r9
movq %r9, 224(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[29] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 232(%rsi)
addq %rax, %r10
movq %r10, 232(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[30] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 240(%rsi)
addq %rax, %r8
movq %r8, 240(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[31] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 248(%rsi)
addq %rax, %r9
movq %r9, 248(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[32] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 256(%rsi)
addq %rax, %r10
movq %r10, 256(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[33] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 264(%rsi)
addq %rax, %r8
movq %r8, 264(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[34] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 272(%rsi)
addq %rax, %r9
movq %r9, 272(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[35] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 280(%rsi)
addq %rax, %r10
movq %r10, 280(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[36] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 288(%rsi)
addq %rax, %r8
movq %r8, 288(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[37] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 296(%rsi)
addq %rax, %r9
movq %r9, 296(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[38] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 304(%rsi)
addq %rax, %r10
movq %r10, 304(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[39] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 312(%rsi)
addq %rax, %r8
movq %r8, 312(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[40] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 320(%rsi)
addq %rax, %r9
movq %r9, 320(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[41] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 328(%rsi)
addq %rax, %r10
movq %r10, 328(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[42] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 336(%rsi)
addq %rax, %r8
movq %r8, 336(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[43] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 344(%rsi)
addq %rax, %r9
movq %r9, 344(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[44] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 352(%rsi)
addq %rax, %r10
movq %r10, 352(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[45] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 360(%rsi)
addq %rax, %r8
movq %r8, 360(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[46] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 368(%rsi)
addq %rax, %r9
movq %r9, 368(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[47] * B
movq %rcx, %rax
mulq 376(%rsi)
addq %rax, %r10
adcq %rdx, %r8
movq %r10, 376(%rdi)
movq %r8, 384(%rdi)
repz retq
#ifndef __APPLE__
.size sp_3072_mul_d_48,.-sp_3072_mul_d_48
#endif /* __APPLE__ */
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_3072_cond_sub_24
.type sp_3072_cond_sub_24,@function
.align 16
sp_3072_cond_sub_24:
#else
.section __TEXT,__text
.globl _sp_3072_cond_sub_24
.p2align 4
_sp_3072_cond_sub_24:
#endif /* __APPLE__ */
subq $0xc0, %rsp
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 80(%rsp)
movq %r9, 88(%rsp)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 96(%rsp)
movq %r9, 104(%rsp)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 112(%rsp)
movq %r9, 120(%rsp)
movq 128(%rdx), %r8
movq 136(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 128(%rsp)
movq %r9, 136(%rsp)
movq 144(%rdx), %r8
movq 152(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 144(%rsp)
movq %r9, 152(%rsp)
movq 160(%rdx), %r8
movq 168(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 160(%rsp)
movq %r9, 168(%rsp)
movq 176(%rdx), %r8
movq 184(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 176(%rsp)
movq %r9, 184(%rsp)
movq (%rsi), %r8
movq (%rsp), %rdx
subq %rdx, %r8
movq 8(%rsi), %r9
movq 8(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, (%rdi)
movq 16(%rsi), %r8
movq 16(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 8(%rdi)
movq 24(%rsi), %r9
movq 24(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 16(%rdi)
movq 32(%rsi), %r8
movq 32(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 24(%rdi)
movq 40(%rsi), %r9
movq 40(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 32(%rdi)
movq 48(%rsi), %r8
movq 48(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 40(%rdi)
movq 56(%rsi), %r9
movq 56(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 48(%rdi)
movq 64(%rsi), %r8
movq 64(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 56(%rdi)
movq 72(%rsi), %r9
movq 72(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 64(%rdi)
movq 80(%rsi), %r8
movq 80(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 72(%rdi)
movq 88(%rsi), %r9
movq 88(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 80(%rdi)
movq 96(%rsi), %r8
movq 96(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 88(%rdi)
movq 104(%rsi), %r9
movq 104(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 96(%rdi)
movq 112(%rsi), %r8
movq 112(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 104(%rdi)
movq 120(%rsi), %r9
movq 120(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 112(%rdi)
movq 128(%rsi), %r8
movq 128(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 120(%rdi)
movq 136(%rsi), %r9
movq 136(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 128(%rdi)
movq 144(%rsi), %r8
movq 144(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 136(%rdi)
movq 152(%rsi), %r9
movq 152(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 144(%rdi)
movq 160(%rsi), %r8
movq 160(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 152(%rdi)
movq 168(%rsi), %r9
movq 168(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 160(%rdi)
movq 176(%rsi), %r8
movq 176(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 168(%rdi)
movq 184(%rsi), %r9
movq 184(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 176(%rdi)
movq %r9, 184(%rdi)
sbbq %rax, %rax
addq $0xc0, %rsp
repz retq
#ifndef __APPLE__
.size sp_3072_cond_sub_24,.-sp_3072_cond_sub_24
#endif /* __APPLE__ */
/* Reduce the number back to 3072 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mont_reduce_24
.type sp_3072_mont_reduce_24,@function
.align 16
sp_3072_mont_reduce_24:
#else
.section __TEXT,__text
.globl _sp_3072_mont_reduce_24
.p2align 4
_sp_3072_mont_reduce_24:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rdx, %rcx
xorq %r15, %r15
# i = 24
movq $24, %r8
movq (%rdi), %r13
movq 8(%rdi), %r14
L_3072_mont_reduce_24_loop:
# mu = a[i] * mp
movq %r13, %r11
imulq %rcx, %r11
# a[i+0] += m[0] * mu
movq %r11, %rax
xorq %r10, %r10
mulq (%rsi)
addq %rax, %r13
adcq %rdx, %r10
# a[i+1] += m[1] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 8(%rsi)
movq %r14, %r13
addq %rax, %r13
adcq %rdx, %r9
addq %r10, %r13
adcq $0x00, %r9
# a[i+2] += m[2] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 16(%rsi)
movq 16(%rdi), %r14
addq %rax, %r14
adcq %rdx, %r10
addq %r9, %r14
adcq $0x00, %r10
# a[i+3] += m[3] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 24(%rsi)
movq 24(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 24(%rdi)
adcq $0x00, %r9
# a[i+4] += m[4] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 32(%rsi)
movq 32(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 32(%rdi)
adcq $0x00, %r10
# a[i+5] += m[5] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 40(%rsi)
movq 40(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 40(%rdi)
adcq $0x00, %r9
# a[i+6] += m[6] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 48(%rsi)
movq 48(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 48(%rdi)
adcq $0x00, %r10
# a[i+7] += m[7] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 56(%rsi)
movq 56(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 56(%rdi)
adcq $0x00, %r9
# a[i+8] += m[8] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 64(%rsi)
movq 64(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 64(%rdi)
adcq $0x00, %r10
# a[i+9] += m[9] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 72(%rsi)
movq 72(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 72(%rdi)
adcq $0x00, %r9
# a[i+10] += m[10] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 80(%rsi)
movq 80(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 80(%rdi)
adcq $0x00, %r10
# a[i+11] += m[11] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 88(%rsi)
movq 88(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 88(%rdi)
adcq $0x00, %r9
# a[i+12] += m[12] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 96(%rsi)
movq 96(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 96(%rdi)
adcq $0x00, %r10
# a[i+13] += m[13] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 104(%rsi)
movq 104(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 104(%rdi)
adcq $0x00, %r9
# a[i+14] += m[14] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 112(%rsi)
movq 112(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 112(%rdi)
adcq $0x00, %r10
# a[i+15] += m[15] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 120(%rsi)
movq 120(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 120(%rdi)
adcq $0x00, %r9
# a[i+16] += m[16] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 128(%rsi)
movq 128(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 128(%rdi)
adcq $0x00, %r10
# a[i+17] += m[17] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 136(%rsi)
movq 136(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 136(%rdi)
adcq $0x00, %r9
# a[i+18] += m[18] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 144(%rsi)
movq 144(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 144(%rdi)
adcq $0x00, %r10
# a[i+19] += m[19] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 152(%rsi)
movq 152(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 152(%rdi)
adcq $0x00, %r9
# a[i+20] += m[20] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 160(%rsi)
movq 160(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 160(%rdi)
adcq $0x00, %r10
# a[i+21] += m[21] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 168(%rsi)
movq 168(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 168(%rdi)
adcq $0x00, %r9
# a[i+22] += m[22] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 176(%rsi)
movq 176(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 176(%rdi)
adcq $0x00, %r10
# a[i+23] += m[23] * mu
movq %r11, %rax
mulq 184(%rsi)
movq 184(%rdi), %r12
addq %rax, %r10
adcq %r15, %rdx
movq $0x00, %r15
adcq $0x00, %r15
addq %r10, %r12
movq %r12, 184(%rdi)
adcq %rdx, 192(%rdi)
adcq $0x00, %r15
# i -= 1
addq $8, %rdi
decq %r8
jnz L_3072_mont_reduce_24_loop
movq %r13, (%rdi)
movq %r14, 8(%rdi)
negq %r15
#ifdef _WIN64
movq %rsi, %rdx
movq %r15, %rcx
#else
movq %r15, %rcx
movq %rsi, %rdx
#endif /* _WIN64 */
movq %rdi, %rsi
movq %rdi, %rdi
subq $0xc0, %rdi
#ifndef __APPLE__
callq sp_3072_cond_sub_24@plt
#else
callq _sp_3072_cond_sub_24
#endif /* __APPLE__ */
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_3072_mont_reduce_24,.-sp_3072_mont_reduce_24
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_3072_cond_sub_avx2_24
.type sp_3072_cond_sub_avx2_24,@function
.align 16
sp_3072_cond_sub_avx2_24:
#else
.section __TEXT,__text
.globl _sp_3072_cond_sub_avx2_24
.p2align 4
_sp_3072_cond_sub_avx2_24:
#endif /* __APPLE__ */
movq (%rdx), %r10
movq (%rsi), %r8
pextq %rcx, %r10, %r10
subq %r10, %r8
movq 8(%rdx), %r10
movq 8(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, (%rdi)
sbbq %r10, %r9
movq 16(%rdx), %r8
movq 16(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 8(%rdi)
sbbq %r8, %r10
movq 24(%rdx), %r9
movq 24(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 16(%rdi)
sbbq %r9, %r8
movq 32(%rdx), %r10
movq 32(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 24(%rdi)
sbbq %r10, %r9
movq 40(%rdx), %r8
movq 40(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 32(%rdi)
sbbq %r8, %r10
movq 48(%rdx), %r9
movq 48(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 40(%rdi)
sbbq %r9, %r8
movq 56(%rdx), %r10
movq 56(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 48(%rdi)
sbbq %r10, %r9
movq 64(%rdx), %r8
movq 64(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 56(%rdi)
sbbq %r8, %r10
movq 72(%rdx), %r9
movq 72(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 64(%rdi)
sbbq %r9, %r8
movq 80(%rdx), %r10
movq 80(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 72(%rdi)
sbbq %r10, %r9
movq 88(%rdx), %r8
movq 88(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 80(%rdi)
sbbq %r8, %r10
movq 96(%rdx), %r9
movq 96(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 88(%rdi)
sbbq %r9, %r8
movq 104(%rdx), %r10
movq 104(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 96(%rdi)
sbbq %r10, %r9
movq 112(%rdx), %r8
movq 112(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 104(%rdi)
sbbq %r8, %r10
movq 120(%rdx), %r9
movq 120(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 112(%rdi)
sbbq %r9, %r8
movq 128(%rdx), %r10
movq 128(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 120(%rdi)
sbbq %r10, %r9
movq 136(%rdx), %r8
movq 136(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 128(%rdi)
sbbq %r8, %r10
movq 144(%rdx), %r9
movq 144(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 136(%rdi)
sbbq %r9, %r8
movq 152(%rdx), %r10
movq 152(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 144(%rdi)
sbbq %r10, %r9
movq 160(%rdx), %r8
movq 160(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 152(%rdi)
sbbq %r8, %r10
movq 168(%rdx), %r9
movq 168(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 160(%rdi)
sbbq %r9, %r8
movq 176(%rdx), %r10
movq 176(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 168(%rdi)
sbbq %r10, %r9
movq 184(%rdx), %r8
movq 184(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 176(%rdi)
sbbq %r8, %r10
movq %r10, 184(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_3072_cond_sub_avx2_24,.-sp_3072_cond_sub_avx2_24
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mul_d_24
.type sp_3072_mul_d_24,@function
.align 16
sp_3072_mul_d_24:
#else
.section __TEXT,__text
.globl _sp_3072_mul_d_24
.p2align 4
_sp_3072_mul_d_24:
#endif /* __APPLE__ */
movq %rdx, %rcx
# A[0] * B
movq %rcx, %rax
xorq %r10, %r10
mulq (%rsi)
movq %rax, %r8
movq %rdx, %r9
movq %r8, (%rdi)
# A[1] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 8(%rsi)
addq %rax, %r9
movq %r9, 8(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 16(%rsi)
addq %rax, %r10
movq %r10, 16(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 24(%rsi)
addq %rax, %r8
movq %r8, 24(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 32(%rsi)
addq %rax, %r9
movq %r9, 32(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 40(%rsi)
addq %rax, %r10
movq %r10, 40(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 48(%rsi)
addq %rax, %r8
movq %r8, 48(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 56(%rsi)
addq %rax, %r9
movq %r9, 56(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 64(%rsi)
addq %rax, %r10
movq %r10, 64(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 72(%rsi)
addq %rax, %r8
movq %r8, 72(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 80(%rsi)
addq %rax, %r9
movq %r9, 80(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 88(%rsi)
addq %rax, %r10
movq %r10, 88(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 96(%rsi)
addq %rax, %r8
movq %r8, 96(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 104(%rsi)
addq %rax, %r9
movq %r9, 104(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 112(%rsi)
addq %rax, %r10
movq %r10, 112(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 120(%rsi)
addq %rax, %r8
movq %r8, 120(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[16] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 128(%rsi)
addq %rax, %r9
movq %r9, 128(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[17] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 136(%rsi)
addq %rax, %r10
movq %r10, 136(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[18] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 144(%rsi)
addq %rax, %r8
movq %r8, 144(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[19] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 152(%rsi)
addq %rax, %r9
movq %r9, 152(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[20] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 160(%rsi)
addq %rax, %r10
movq %r10, 160(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[21] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 168(%rsi)
addq %rax, %r8
movq %r8, 168(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[22] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 176(%rsi)
addq %rax, %r9
movq %r9, 176(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[23] * B
movq %rcx, %rax
mulq 184(%rsi)
addq %rax, %r10
adcq %rdx, %r8
movq %r10, 184(%rdi)
movq %r8, 192(%rdi)
repz retq
#ifndef __APPLE__
.size sp_3072_mul_d_24,.-sp_3072_mul_d_24
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mul_d_avx2_24
.type sp_3072_mul_d_avx2_24,@function
.align 16
sp_3072_mul_d_avx2_24:
#else
.section __TEXT,__text
.globl _sp_3072_mul_d_avx2_24
.p2align 4
_sp_3072_mul_d_avx2_24:
#endif /* __APPLE__ */
movq %rdx, %rax
# A[0] * B
movq %rax, %rdx
xorq %r11, %r11
mulxq (%rsi), %r9, %r10
movq %r9, (%rdi)
# A[1] * B
mulxq 8(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 8(%rdi)
# A[2] * B
mulxq 16(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 16(%rdi)
# A[3] * B
mulxq 24(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 24(%rdi)
# A[4] * B
mulxq 32(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 32(%rdi)
# A[5] * B
mulxq 40(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 40(%rdi)
# A[6] * B
mulxq 48(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 48(%rdi)
# A[7] * B
mulxq 56(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 56(%rdi)
# A[8] * B
mulxq 64(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 64(%rdi)
# A[9] * B
mulxq 72(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 72(%rdi)
# A[10] * B
mulxq 80(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 80(%rdi)
# A[11] * B
mulxq 88(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 88(%rdi)
# A[12] * B
mulxq 96(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 96(%rdi)
# A[13] * B
mulxq 104(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 104(%rdi)
# A[14] * B
mulxq 112(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 112(%rdi)
# A[15] * B
mulxq 120(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 120(%rdi)
# A[16] * B
mulxq 128(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 128(%rdi)
# A[17] * B
mulxq 136(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 136(%rdi)
# A[18] * B
mulxq 144(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 144(%rdi)
# A[19] * B
mulxq 152(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 152(%rdi)
# A[20] * B
mulxq 160(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 160(%rdi)
# A[21] * B
mulxq 168(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 168(%rdi)
# A[22] * B
mulxq 176(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 176(%rdi)
# A[23] * B
mulxq 184(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
adcxq %r11, %r9
movq %r10, 184(%rdi)
movq %r9, 192(%rdi)
repz retq
#ifndef __APPLE__
.size sp_3072_mul_d_avx2_24,.-sp_3072_mul_d_avx2_24
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef _WIN64
/* Divide the double width number (d1|d0) by the dividend. (d1|d0 / div)
*
* d1 The high order half of the number to divide.
* d0 The low order half of the number to divide.
* div The dividend.
* returns the result of the division.
*/
#ifndef __APPLE__
.text
.globl div_3072_word_asm_24
.type div_3072_word_asm_24,@function
.align 16
div_3072_word_asm_24:
#else
.section __TEXT,__text
.globl _div_3072_word_asm_24
.p2align 4
_div_3072_word_asm_24:
#endif /* __APPLE__ */
movq %rdx, %rcx
movq %rsi, %rax
movq %rdi, %rdx
divq %rcx
repz retq
#ifndef __APPLE__
.size div_3072_word_asm_24,.-div_3072_word_asm_24
#endif /* __APPLE__ */
#endif /* _WIN64 */
/* Compare a with b in constant time.
*
* a A single precision integer.
* b A single precision integer.
* return -ve, 0 or +ve if a is less than, equal to or greater than b
* respectively.
*/
#ifndef __APPLE__
.text
.globl sp_3072_cmp_24
.type sp_3072_cmp_24,@function
.align 16
sp_3072_cmp_24:
#else
.section __TEXT,__text
.globl _sp_3072_cmp_24
.p2align 4
_sp_3072_cmp_24:
#endif /* __APPLE__ */
xorq %rcx, %rcx
movq $-1, %rdx
movq $-1, %rax
movq $0x01, %r8
movq 184(%rdi), %r9
movq 184(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 176(%rdi), %r9
movq 176(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 168(%rdi), %r9
movq 168(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 160(%rdi), %r9
movq 160(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 152(%rdi), %r9
movq 152(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 144(%rdi), %r9
movq 144(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 136(%rdi), %r9
movq 136(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 128(%rdi), %r9
movq 128(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 120(%rdi), %r9
movq 120(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 112(%rdi), %r9
movq 112(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 104(%rdi), %r9
movq 104(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 96(%rdi), %r9
movq 96(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 88(%rdi), %r9
movq 88(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 80(%rdi), %r9
movq 80(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 72(%rdi), %r9
movq 72(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 64(%rdi), %r9
movq 64(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 56(%rdi), %r9
movq 56(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 48(%rdi), %r9
movq 48(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 40(%rdi), %r9
movq 40(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 32(%rdi), %r9
movq 32(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 24(%rdi), %r9
movq 24(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 16(%rdi), %r9
movq 16(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 8(%rdi), %r9
movq 8(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq (%rdi), %r9
movq (%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
xorq %rdx, %rax
repz retq
#ifndef __APPLE__
.size sp_3072_cmp_24,.-sp_3072_cmp_24
#endif /* __APPLE__ */
#ifndef WC_NO_CACHE_RESISTANT
#ifndef __APPLE__
.text
.globl sp_3072_get_from_table_24
.type sp_3072_get_from_table_24,@function
.align 16
sp_3072_get_from_table_24:
#else
.section __TEXT,__text
.globl _sp_3072_get_from_table_24
.p2align 4
_sp_3072_get_from_table_24:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %rdx, %xmm10
movd %rax, %xmm11
pxor %xmm13, %xmm13
pshufd $0x00, %xmm11, %xmm11
pshufd $0x00, %xmm10, %xmm10
# START: 0-7
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 16
movq 128(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 17
movq 136(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 18
movq 144(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 19
movq 152(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 20
movq 160(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 21
movq 168(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 22
movq 176(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 23
movq 184(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 24
movq 192(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 25
movq 200(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 26
movq 208(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 27
movq 216(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 28
movq 224(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 29
movq 232(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 30
movq 240(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 31
movq 248(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 0-7
# START: 8-15
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 16
movq 128(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 17
movq 136(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 18
movq 144(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 19
movq 152(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 20
movq 160(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 21
movq 168(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 22
movq 176(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 23
movq 184(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 24
movq 192(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 25
movq 200(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 26
movq 208(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 27
movq 216(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 28
movq 224(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 29
movq 232(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 30
movq 240(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 31
movq 248(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 8-15
# START: 16-23
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 16
movq 128(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 17
movq 136(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 18
movq 144(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 19
movq 152(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 20
movq 160(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 21
movq 168(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 22
movq 176(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 23
movq 184(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 24
movq 192(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 25
movq 200(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 26
movq 208(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 27
movq 216(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 28
movq 224(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 29
movq 232(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 30
movq 240(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 31
movq 248(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
# END: 16-23
repz retq
#ifndef __APPLE__
.size sp_3072_get_from_table_24,.-sp_3072_get_from_table_24
#endif /* __APPLE__ */
#endif /* !WC_NO_CACHE_RESISTANT */
#ifdef HAVE_INTEL_AVX2
/* Reduce the number back to 3072 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mont_reduce_avx2_24
.type sp_3072_mont_reduce_avx2_24,@function
.align 16
sp_3072_mont_reduce_avx2_24:
#else
.section __TEXT,__text
.globl _sp_3072_mont_reduce_avx2_24
.p2align 4
_sp_3072_mont_reduce_avx2_24:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %r8
xorq %rbp, %rbp
# i = 24
movq $24, %r9
movq (%rdi), %r12
movq 8(%rdi), %r13
movq 16(%rdi), %r14
movq 24(%rdi), %r15
addq $0x60, %rdi
xorq %rbp, %rbp
L_3072_mont_reduce_avx2_24_loop:
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r10
imulq %r8, %rdx
xorq %rbx, %rbx
# a[i+0] += m[0] * mu
mulxq (%rsi), %rax, %rcx
movq %r13, %r12
adcxq %rax, %r10
adoxq %rcx, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rax, %rcx
movq %r14, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rax, %rcx
movq %r15, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rax, %rcx
movq -64(%rdi), %r15
adcxq %rax, %r14
adoxq %rcx, %r15
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rax, %rcx
movq -56(%rdi), %r11
adcxq %rax, %r15
adoxq %rcx, %r11
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rax, %rcx
movq -48(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -56(%rdi)
# a[i+6] += m[6] * mu
mulxq 48(%rsi), %rax, %rcx
movq -40(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -48(%rdi)
# a[i+7] += m[7] * mu
mulxq 56(%rsi), %rax, %rcx
movq -32(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -40(%rdi)
# a[i+8] += m[8] * mu
mulxq 64(%rsi), %rax, %rcx
movq -24(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -32(%rdi)
# a[i+9] += m[9] * mu
mulxq 72(%rsi), %rax, %rcx
movq -16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -24(%rdi)
# a[i+10] += m[10] * mu
mulxq 80(%rsi), %rax, %rcx
movq -8(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -16(%rdi)
# a[i+11] += m[11] * mu
mulxq 88(%rsi), %rax, %rcx
movq (%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -8(%rdi)
# a[i+12] += m[12] * mu
mulxq 96(%rsi), %rax, %rcx
movq 8(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, (%rdi)
# a[i+13] += m[13] * mu
mulxq 104(%rsi), %rax, %rcx
movq 16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 8(%rdi)
# a[i+14] += m[14] * mu
mulxq 112(%rsi), %rax, %rcx
movq 24(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 16(%rdi)
# a[i+15] += m[15] * mu
mulxq 120(%rsi), %rax, %rcx
movq 32(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 24(%rdi)
# a[i+16] += m[16] * mu
mulxq 128(%rsi), %rax, %rcx
movq 40(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 32(%rdi)
# a[i+17] += m[17] * mu
mulxq 136(%rsi), %rax, %rcx
movq 48(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 40(%rdi)
# a[i+18] += m[18] * mu
mulxq 144(%rsi), %rax, %rcx
movq 56(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rdi)
# a[i+19] += m[19] * mu
mulxq 152(%rsi), %rax, %rcx
movq 64(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 56(%rdi)
# a[i+20] += m[20] * mu
mulxq 160(%rsi), %rax, %rcx
movq 72(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 64(%rdi)
# a[i+21] += m[21] * mu
mulxq 168(%rsi), %rax, %rcx
movq 80(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 72(%rdi)
# a[i+22] += m[22] * mu
mulxq 176(%rsi), %rax, %rcx
movq 88(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 80(%rdi)
# a[i+23] += m[23] * mu
mulxq 184(%rsi), %rax, %rcx
movq 96(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 88(%rdi)
adcxq %rbp, %r10
movq %rbx, %rbp
movq %r10, 96(%rdi)
adoxq %rbx, %rbp
adcxq %rbx, %rbp
# a += 1
addq $8, %rdi
# i -= 1
subq $0x01, %r9
jnz L_3072_mont_reduce_avx2_24_loop
subq $0x60, %rdi
negq %rbp
movq %rdi, %r8
subq $0xc0, %rdi
movq (%rsi), %rcx
movq %r12, %rdx
pextq %rbp, %rcx, %rcx
subq %rcx, %rdx
movq 8(%rsi), %rcx
movq %r13, %rax
pextq %rbp, %rcx, %rcx
movq %rdx, (%rdi)
sbbq %rcx, %rax
movq 16(%rsi), %rdx
movq %r14, %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 8(%rdi)
sbbq %rdx, %rcx
movq 24(%rsi), %rax
movq %r15, %rdx
pextq %rbp, %rax, %rax
movq %rcx, 16(%rdi)
sbbq %rax, %rdx
movq 32(%rsi), %rcx
movq 32(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 24(%rdi)
sbbq %rcx, %rax
movq 40(%rsi), %rdx
movq 40(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 32(%rdi)
sbbq %rdx, %rcx
movq 48(%rsi), %rax
movq 48(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 40(%rdi)
sbbq %rax, %rdx
movq 56(%rsi), %rcx
movq 56(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 48(%rdi)
sbbq %rcx, %rax
movq 64(%rsi), %rdx
movq 64(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 56(%rdi)
sbbq %rdx, %rcx
movq 72(%rsi), %rax
movq 72(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 64(%rdi)
sbbq %rax, %rdx
movq 80(%rsi), %rcx
movq 80(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 72(%rdi)
sbbq %rcx, %rax
movq 88(%rsi), %rdx
movq 88(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 80(%rdi)
sbbq %rdx, %rcx
movq 96(%rsi), %rax
movq 96(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 88(%rdi)
sbbq %rax, %rdx
movq 104(%rsi), %rcx
movq 104(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 96(%rdi)
sbbq %rcx, %rax
movq 112(%rsi), %rdx
movq 112(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 104(%rdi)
sbbq %rdx, %rcx
movq 120(%rsi), %rax
movq 120(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 112(%rdi)
sbbq %rax, %rdx
movq 128(%rsi), %rcx
movq 128(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 120(%rdi)
sbbq %rcx, %rax
movq 136(%rsi), %rdx
movq 136(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 128(%rdi)
sbbq %rdx, %rcx
movq 144(%rsi), %rax
movq 144(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 136(%rdi)
sbbq %rax, %rdx
movq 152(%rsi), %rcx
movq 152(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 144(%rdi)
sbbq %rcx, %rax
movq 160(%rsi), %rdx
movq 160(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 152(%rdi)
sbbq %rdx, %rcx
movq 168(%rsi), %rax
movq 168(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 160(%rdi)
sbbq %rax, %rdx
movq 176(%rsi), %rcx
movq 176(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 168(%rdi)
sbbq %rcx, %rax
movq 184(%rsi), %rdx
movq 184(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 176(%rdi)
sbbq %rdx, %rcx
movq %rcx, 184(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_3072_mont_reduce_avx2_24,.-sp_3072_mont_reduce_avx2_24
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifndef WC_NO_CACHE_RESISTANT
#ifndef __APPLE__
.text
.globl sp_3072_get_from_table_avx2_24
.type sp_3072_get_from_table_avx2_24,@function
.align 16
sp_3072_get_from_table_avx2_24:
#else
.section __TEXT,__text
.globl _sp_3072_get_from_table_avx2_24
.p2align 4
_sp_3072_get_from_table_avx2_24:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %rdx, %xmm10
movd %rax, %xmm11
vpxor %ymm13, %ymm13, %ymm13
vpermd %ymm10, %ymm13, %ymm10
vpermd %ymm11, %ymm13, %ymm11
# START: 0-15
vpxor %ymm13, %ymm13, %ymm13
vpxor %ymm4, %ymm4, %ymm4
vpxor %ymm5, %ymm5, %ymm5
vpxor %ymm6, %ymm6, %ymm6
vpxor %ymm7, %ymm7, %ymm7
# ENTRY: 0
movq (%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 1
movq 8(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 2
movq 16(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 3
movq 24(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 4
movq 32(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 5
movq 40(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 6
movq 48(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 7
movq 56(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 8
movq 64(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 9
movq 72(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 10
movq 80(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 11
movq 88(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 12
movq 96(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 13
movq 104(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 14
movq 112(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 15
movq 120(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 16
movq 128(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 17
movq 136(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 18
movq 144(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 19
movq 152(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 20
movq 160(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 21
movq 168(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 22
movq 176(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 23
movq 184(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 24
movq 192(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 25
movq 200(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 26
movq 208(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 27
movq 216(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 28
movq 224(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 29
movq 232(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 30
movq 240(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 31
movq 248(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
vmovdqu %ymm4, (%rdi)
vmovdqu %ymm5, 32(%rdi)
vmovdqu %ymm6, 64(%rdi)
vmovdqu %ymm7, 96(%rdi)
addq $0x80, %rdi
# END: 0-15
# START: 16-23
vpxor %ymm13, %ymm13, %ymm13
vpxor %ymm4, %ymm4, %ymm4
vpxor %ymm5, %ymm5, %ymm5
# ENTRY: 0
movq (%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 16
movq 128(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 17
movq 136(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 18
movq 144(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 19
movq 152(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 20
movq 160(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 21
movq 168(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 22
movq 176(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 23
movq 184(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 24
movq 192(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 25
movq 200(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 26
movq 208(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 27
movq 216(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 28
movq 224(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 29
movq 232(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 30
movq 240(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 31
movq 248(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpaddd %ymm11, %ymm13, %ymm13
vmovdqu %ymm4, (%rdi)
vmovdqu %ymm5, 32(%rdi)
# END: 16-23
repz retq
#ifndef __APPLE__
.size sp_3072_get_from_table_avx2_24,.-sp_3072_get_from_table_avx2_24
#endif /* __APPLE__ */
#endif /* !WC_NO_CACHE_RESISTANT */
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_3072_cond_sub_48
.type sp_3072_cond_sub_48,@function
.align 16
sp_3072_cond_sub_48:
#else
.section __TEXT,__text
.globl _sp_3072_cond_sub_48
.p2align 4
_sp_3072_cond_sub_48:
#endif /* __APPLE__ */
subq $0x180, %rsp
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 80(%rsp)
movq %r9, 88(%rsp)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 96(%rsp)
movq %r9, 104(%rsp)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 112(%rsp)
movq %r9, 120(%rsp)
movq 128(%rdx), %r8
movq 136(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 128(%rsp)
movq %r9, 136(%rsp)
movq 144(%rdx), %r8
movq 152(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 144(%rsp)
movq %r9, 152(%rsp)
movq 160(%rdx), %r8
movq 168(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 160(%rsp)
movq %r9, 168(%rsp)
movq 176(%rdx), %r8
movq 184(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 176(%rsp)
movq %r9, 184(%rsp)
movq 192(%rdx), %r8
movq 200(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 192(%rsp)
movq %r9, 200(%rsp)
movq 208(%rdx), %r8
movq 216(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 208(%rsp)
movq %r9, 216(%rsp)
movq 224(%rdx), %r8
movq 232(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 224(%rsp)
movq %r9, 232(%rsp)
movq 240(%rdx), %r8
movq 248(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 240(%rsp)
movq %r9, 248(%rsp)
movq 256(%rdx), %r8
movq 264(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 256(%rsp)
movq %r9, 264(%rsp)
movq 272(%rdx), %r8
movq 280(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 272(%rsp)
movq %r9, 280(%rsp)
movq 288(%rdx), %r8
movq 296(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 288(%rsp)
movq %r9, 296(%rsp)
movq 304(%rdx), %r8
movq 312(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 304(%rsp)
movq %r9, 312(%rsp)
movq 320(%rdx), %r8
movq 328(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 320(%rsp)
movq %r9, 328(%rsp)
movq 336(%rdx), %r8
movq 344(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 336(%rsp)
movq %r9, 344(%rsp)
movq 352(%rdx), %r8
movq 360(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 352(%rsp)
movq %r9, 360(%rsp)
movq 368(%rdx), %r8
movq 376(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 368(%rsp)
movq %r9, 376(%rsp)
movq (%rsi), %r8
movq (%rsp), %rdx
subq %rdx, %r8
movq 8(%rsi), %r9
movq 8(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, (%rdi)
movq 16(%rsi), %r8
movq 16(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 8(%rdi)
movq 24(%rsi), %r9
movq 24(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 16(%rdi)
movq 32(%rsi), %r8
movq 32(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 24(%rdi)
movq 40(%rsi), %r9
movq 40(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 32(%rdi)
movq 48(%rsi), %r8
movq 48(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 40(%rdi)
movq 56(%rsi), %r9
movq 56(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 48(%rdi)
movq 64(%rsi), %r8
movq 64(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 56(%rdi)
movq 72(%rsi), %r9
movq 72(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 64(%rdi)
movq 80(%rsi), %r8
movq 80(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 72(%rdi)
movq 88(%rsi), %r9
movq 88(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 80(%rdi)
movq 96(%rsi), %r8
movq 96(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 88(%rdi)
movq 104(%rsi), %r9
movq 104(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 96(%rdi)
movq 112(%rsi), %r8
movq 112(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 104(%rdi)
movq 120(%rsi), %r9
movq 120(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 112(%rdi)
movq 128(%rsi), %r8
movq 128(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 120(%rdi)
movq 136(%rsi), %r9
movq 136(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 128(%rdi)
movq 144(%rsi), %r8
movq 144(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 136(%rdi)
movq 152(%rsi), %r9
movq 152(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 144(%rdi)
movq 160(%rsi), %r8
movq 160(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 152(%rdi)
movq 168(%rsi), %r9
movq 168(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 160(%rdi)
movq 176(%rsi), %r8
movq 176(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 168(%rdi)
movq 184(%rsi), %r9
movq 184(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 176(%rdi)
movq 192(%rsi), %r8
movq 192(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 184(%rdi)
movq 200(%rsi), %r9
movq 200(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 192(%rdi)
movq 208(%rsi), %r8
movq 208(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 200(%rdi)
movq 216(%rsi), %r9
movq 216(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 208(%rdi)
movq 224(%rsi), %r8
movq 224(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 216(%rdi)
movq 232(%rsi), %r9
movq 232(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 224(%rdi)
movq 240(%rsi), %r8
movq 240(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 232(%rdi)
movq 248(%rsi), %r9
movq 248(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 240(%rdi)
movq 256(%rsi), %r8
movq 256(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 248(%rdi)
movq 264(%rsi), %r9
movq 264(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 256(%rdi)
movq 272(%rsi), %r8
movq 272(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 264(%rdi)
movq 280(%rsi), %r9
movq 280(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 272(%rdi)
movq 288(%rsi), %r8
movq 288(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 280(%rdi)
movq 296(%rsi), %r9
movq 296(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 288(%rdi)
movq 304(%rsi), %r8
movq 304(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 296(%rdi)
movq 312(%rsi), %r9
movq 312(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 304(%rdi)
movq 320(%rsi), %r8
movq 320(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 312(%rdi)
movq 328(%rsi), %r9
movq 328(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 320(%rdi)
movq 336(%rsi), %r8
movq 336(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 328(%rdi)
movq 344(%rsi), %r9
movq 344(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 336(%rdi)
movq 352(%rsi), %r8
movq 352(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 344(%rdi)
movq 360(%rsi), %r9
movq 360(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 352(%rdi)
movq 368(%rsi), %r8
movq 368(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 360(%rdi)
movq 376(%rsi), %r9
movq 376(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 368(%rdi)
movq %r9, 376(%rdi)
sbbq %rax, %rax
addq $0x180, %rsp
repz retq
#ifndef __APPLE__
.size sp_3072_cond_sub_48,.-sp_3072_cond_sub_48
#endif /* __APPLE__ */
/* Reduce the number back to 3072 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mont_reduce_48
.type sp_3072_mont_reduce_48,@function
.align 16
sp_3072_mont_reduce_48:
#else
.section __TEXT,__text
.globl _sp_3072_mont_reduce_48
.p2align 4
_sp_3072_mont_reduce_48:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rdx, %rcx
xorq %r15, %r15
# i = 48
movq $48, %r8
movq (%rdi), %r13
movq 8(%rdi), %r14
L_3072_mont_reduce_48_loop:
# mu = a[i] * mp
movq %r13, %r11
imulq %rcx, %r11
# a[i+0] += m[0] * mu
movq %r11, %rax
xorq %r10, %r10
mulq (%rsi)
addq %rax, %r13
adcq %rdx, %r10
# a[i+1] += m[1] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 8(%rsi)
movq %r14, %r13
addq %rax, %r13
adcq %rdx, %r9
addq %r10, %r13
adcq $0x00, %r9
# a[i+2] += m[2] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 16(%rsi)
movq 16(%rdi), %r14
addq %rax, %r14
adcq %rdx, %r10
addq %r9, %r14
adcq $0x00, %r10
# a[i+3] += m[3] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 24(%rsi)
movq 24(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 24(%rdi)
adcq $0x00, %r9
# a[i+4] += m[4] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 32(%rsi)
movq 32(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 32(%rdi)
adcq $0x00, %r10
# a[i+5] += m[5] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 40(%rsi)
movq 40(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 40(%rdi)
adcq $0x00, %r9
# a[i+6] += m[6] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 48(%rsi)
movq 48(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 48(%rdi)
adcq $0x00, %r10
# a[i+7] += m[7] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 56(%rsi)
movq 56(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 56(%rdi)
adcq $0x00, %r9
# a[i+8] += m[8] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 64(%rsi)
movq 64(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 64(%rdi)
adcq $0x00, %r10
# a[i+9] += m[9] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 72(%rsi)
movq 72(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 72(%rdi)
adcq $0x00, %r9
# a[i+10] += m[10] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 80(%rsi)
movq 80(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 80(%rdi)
adcq $0x00, %r10
# a[i+11] += m[11] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 88(%rsi)
movq 88(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 88(%rdi)
adcq $0x00, %r9
# a[i+12] += m[12] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 96(%rsi)
movq 96(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 96(%rdi)
adcq $0x00, %r10
# a[i+13] += m[13] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 104(%rsi)
movq 104(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 104(%rdi)
adcq $0x00, %r9
# a[i+14] += m[14] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 112(%rsi)
movq 112(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 112(%rdi)
adcq $0x00, %r10
# a[i+15] += m[15] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 120(%rsi)
movq 120(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 120(%rdi)
adcq $0x00, %r9
# a[i+16] += m[16] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 128(%rsi)
movq 128(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 128(%rdi)
adcq $0x00, %r10
# a[i+17] += m[17] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 136(%rsi)
movq 136(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 136(%rdi)
adcq $0x00, %r9
# a[i+18] += m[18] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 144(%rsi)
movq 144(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 144(%rdi)
adcq $0x00, %r10
# a[i+19] += m[19] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 152(%rsi)
movq 152(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 152(%rdi)
adcq $0x00, %r9
# a[i+20] += m[20] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 160(%rsi)
movq 160(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 160(%rdi)
adcq $0x00, %r10
# a[i+21] += m[21] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 168(%rsi)
movq 168(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 168(%rdi)
adcq $0x00, %r9
# a[i+22] += m[22] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 176(%rsi)
movq 176(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 176(%rdi)
adcq $0x00, %r10
# a[i+23] += m[23] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 184(%rsi)
movq 184(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 184(%rdi)
adcq $0x00, %r9
# a[i+24] += m[24] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 192(%rsi)
movq 192(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 192(%rdi)
adcq $0x00, %r10
# a[i+25] += m[25] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 200(%rsi)
movq 200(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 200(%rdi)
adcq $0x00, %r9
# a[i+26] += m[26] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 208(%rsi)
movq 208(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 208(%rdi)
adcq $0x00, %r10
# a[i+27] += m[27] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 216(%rsi)
movq 216(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 216(%rdi)
adcq $0x00, %r9
# a[i+28] += m[28] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 224(%rsi)
movq 224(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 224(%rdi)
adcq $0x00, %r10
# a[i+29] += m[29] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 232(%rsi)
movq 232(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 232(%rdi)
adcq $0x00, %r9
# a[i+30] += m[30] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 240(%rsi)
movq 240(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 240(%rdi)
adcq $0x00, %r10
# a[i+31] += m[31] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 248(%rsi)
movq 248(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 248(%rdi)
adcq $0x00, %r9
# a[i+32] += m[32] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 256(%rsi)
movq 256(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 256(%rdi)
adcq $0x00, %r10
# a[i+33] += m[33] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 264(%rsi)
movq 264(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 264(%rdi)
adcq $0x00, %r9
# a[i+34] += m[34] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 272(%rsi)
movq 272(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 272(%rdi)
adcq $0x00, %r10
# a[i+35] += m[35] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 280(%rsi)
movq 280(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 280(%rdi)
adcq $0x00, %r9
# a[i+36] += m[36] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 288(%rsi)
movq 288(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 288(%rdi)
adcq $0x00, %r10
# a[i+37] += m[37] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 296(%rsi)
movq 296(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 296(%rdi)
adcq $0x00, %r9
# a[i+38] += m[38] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 304(%rsi)
movq 304(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 304(%rdi)
adcq $0x00, %r10
# a[i+39] += m[39] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 312(%rsi)
movq 312(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 312(%rdi)
adcq $0x00, %r9
# a[i+40] += m[40] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 320(%rsi)
movq 320(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 320(%rdi)
adcq $0x00, %r10
# a[i+41] += m[41] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 328(%rsi)
movq 328(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 328(%rdi)
adcq $0x00, %r9
# a[i+42] += m[42] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 336(%rsi)
movq 336(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 336(%rdi)
adcq $0x00, %r10
# a[i+43] += m[43] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 344(%rsi)
movq 344(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 344(%rdi)
adcq $0x00, %r9
# a[i+44] += m[44] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 352(%rsi)
movq 352(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 352(%rdi)
adcq $0x00, %r10
# a[i+45] += m[45] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 360(%rsi)
movq 360(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 360(%rdi)
adcq $0x00, %r9
# a[i+46] += m[46] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 368(%rsi)
movq 368(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 368(%rdi)
adcq $0x00, %r10
# a[i+47] += m[47] * mu
movq %r11, %rax
mulq 376(%rsi)
movq 376(%rdi), %r12
addq %rax, %r10
adcq %r15, %rdx
movq $0x00, %r15
adcq $0x00, %r15
addq %r10, %r12
movq %r12, 376(%rdi)
adcq %rdx, 384(%rdi)
adcq $0x00, %r15
# i -= 1
addq $8, %rdi
decq %r8
jnz L_3072_mont_reduce_48_loop
movq %r13, (%rdi)
movq %r14, 8(%rdi)
negq %r15
#ifdef _WIN64
movq %rsi, %rdx
movq %r15, %rcx
#else
movq %r15, %rcx
movq %rsi, %rdx
#endif /* _WIN64 */
movq %rdi, %rsi
movq %rdi, %rdi
subq $0x180, %rdi
#ifndef __APPLE__
callq sp_3072_cond_sub_48@plt
#else
callq _sp_3072_cond_sub_48
#endif /* __APPLE__ */
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_3072_mont_reduce_48,.-sp_3072_mont_reduce_48
#endif /* __APPLE__ */
/* Sub b from a into r. (r = a - b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_3072_sub_48
.type sp_3072_sub_48,@function
.align 16
sp_3072_sub_48:
#else
.section __TEXT,__text
.globl _sp_3072_sub_48
.p2align 4
_sp_3072_sub_48:
#endif /* __APPLE__ */
movq (%rsi), %rcx
subq (%rdx), %rcx
movq 8(%rsi), %r8
movq %rcx, (%rdi)
sbbq 8(%rdx), %r8
movq 16(%rsi), %rcx
movq %r8, 8(%rdi)
sbbq 16(%rdx), %rcx
movq 24(%rsi), %r8
movq %rcx, 16(%rdi)
sbbq 24(%rdx), %r8
movq 32(%rsi), %rcx
movq %r8, 24(%rdi)
sbbq 32(%rdx), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%rdi)
sbbq 40(%rdx), %r8
movq 48(%rsi), %rcx
movq %r8, 40(%rdi)
sbbq 48(%rdx), %rcx
movq 56(%rsi), %r8
movq %rcx, 48(%rdi)
sbbq 56(%rdx), %r8
movq 64(%rsi), %rcx
movq %r8, 56(%rdi)
sbbq 64(%rdx), %rcx
movq 72(%rsi), %r8
movq %rcx, 64(%rdi)
sbbq 72(%rdx), %r8
movq 80(%rsi), %rcx
movq %r8, 72(%rdi)
sbbq 80(%rdx), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%rdi)
sbbq 88(%rdx), %r8
movq 96(%rsi), %rcx
movq %r8, 88(%rdi)
sbbq 96(%rdx), %rcx
movq 104(%rsi), %r8
movq %rcx, 96(%rdi)
sbbq 104(%rdx), %r8
movq 112(%rsi), %rcx
movq %r8, 104(%rdi)
sbbq 112(%rdx), %rcx
movq 120(%rsi), %r8
movq %rcx, 112(%rdi)
sbbq 120(%rdx), %r8
movq 128(%rsi), %rcx
movq %r8, 120(%rdi)
sbbq 128(%rdx), %rcx
movq 136(%rsi), %r8
movq %rcx, 128(%rdi)
sbbq 136(%rdx), %r8
movq 144(%rsi), %rcx
movq %r8, 136(%rdi)
sbbq 144(%rdx), %rcx
movq 152(%rsi), %r8
movq %rcx, 144(%rdi)
sbbq 152(%rdx), %r8
movq 160(%rsi), %rcx
movq %r8, 152(%rdi)
sbbq 160(%rdx), %rcx
movq 168(%rsi), %r8
movq %rcx, 160(%rdi)
sbbq 168(%rdx), %r8
movq 176(%rsi), %rcx
movq %r8, 168(%rdi)
sbbq 176(%rdx), %rcx
movq 184(%rsi), %r8
movq %rcx, 176(%rdi)
sbbq 184(%rdx), %r8
movq 192(%rsi), %rcx
movq %r8, 184(%rdi)
sbbq 192(%rdx), %rcx
movq 200(%rsi), %r8
movq %rcx, 192(%rdi)
sbbq 200(%rdx), %r8
movq 208(%rsi), %rcx
movq %r8, 200(%rdi)
sbbq 208(%rdx), %rcx
movq 216(%rsi), %r8
movq %rcx, 208(%rdi)
sbbq 216(%rdx), %r8
movq 224(%rsi), %rcx
movq %r8, 216(%rdi)
sbbq 224(%rdx), %rcx
movq 232(%rsi), %r8
movq %rcx, 224(%rdi)
sbbq 232(%rdx), %r8
movq 240(%rsi), %rcx
movq %r8, 232(%rdi)
sbbq 240(%rdx), %rcx
movq 248(%rsi), %r8
movq %rcx, 240(%rdi)
sbbq 248(%rdx), %r8
movq 256(%rsi), %rcx
movq %r8, 248(%rdi)
sbbq 256(%rdx), %rcx
movq 264(%rsi), %r8
movq %rcx, 256(%rdi)
sbbq 264(%rdx), %r8
movq 272(%rsi), %rcx
movq %r8, 264(%rdi)
sbbq 272(%rdx), %rcx
movq 280(%rsi), %r8
movq %rcx, 272(%rdi)
sbbq 280(%rdx), %r8
movq 288(%rsi), %rcx
movq %r8, 280(%rdi)
sbbq 288(%rdx), %rcx
movq 296(%rsi), %r8
movq %rcx, 288(%rdi)
sbbq 296(%rdx), %r8
movq 304(%rsi), %rcx
movq %r8, 296(%rdi)
sbbq 304(%rdx), %rcx
movq 312(%rsi), %r8
movq %rcx, 304(%rdi)
sbbq 312(%rdx), %r8
movq 320(%rsi), %rcx
movq %r8, 312(%rdi)
sbbq 320(%rdx), %rcx
movq 328(%rsi), %r8
movq %rcx, 320(%rdi)
sbbq 328(%rdx), %r8
movq 336(%rsi), %rcx
movq %r8, 328(%rdi)
sbbq 336(%rdx), %rcx
movq 344(%rsi), %r8
movq %rcx, 336(%rdi)
sbbq 344(%rdx), %r8
movq 352(%rsi), %rcx
movq %r8, 344(%rdi)
sbbq 352(%rdx), %rcx
movq 360(%rsi), %r8
movq %rcx, 352(%rdi)
sbbq 360(%rdx), %r8
movq 368(%rsi), %rcx
movq %r8, 360(%rdi)
sbbq 368(%rdx), %rcx
movq 376(%rsi), %r8
movq %rcx, 368(%rdi)
sbbq 376(%rdx), %r8
movq %r8, 376(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_3072_sub_48,.-sp_3072_sub_48
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mul_d_avx2_48
.type sp_3072_mul_d_avx2_48,@function
.align 16
sp_3072_mul_d_avx2_48:
#else
.section __TEXT,__text
.globl _sp_3072_mul_d_avx2_48
.p2align 4
_sp_3072_mul_d_avx2_48:
#endif /* __APPLE__ */
movq %rdx, %rax
# A[0] * B
movq %rax, %rdx
xorq %r11, %r11
mulxq (%rsi), %r9, %r10
movq %r9, (%rdi)
# A[1] * B
mulxq 8(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 8(%rdi)
# A[2] * B
mulxq 16(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 16(%rdi)
# A[3] * B
mulxq 24(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 24(%rdi)
# A[4] * B
mulxq 32(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 32(%rdi)
# A[5] * B
mulxq 40(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 40(%rdi)
# A[6] * B
mulxq 48(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 48(%rdi)
# A[7] * B
mulxq 56(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 56(%rdi)
# A[8] * B
mulxq 64(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 64(%rdi)
# A[9] * B
mulxq 72(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 72(%rdi)
# A[10] * B
mulxq 80(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 80(%rdi)
# A[11] * B
mulxq 88(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 88(%rdi)
# A[12] * B
mulxq 96(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 96(%rdi)
# A[13] * B
mulxq 104(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 104(%rdi)
# A[14] * B
mulxq 112(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 112(%rdi)
# A[15] * B
mulxq 120(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 120(%rdi)
# A[16] * B
mulxq 128(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 128(%rdi)
# A[17] * B
mulxq 136(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 136(%rdi)
# A[18] * B
mulxq 144(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 144(%rdi)
# A[19] * B
mulxq 152(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 152(%rdi)
# A[20] * B
mulxq 160(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 160(%rdi)
# A[21] * B
mulxq 168(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 168(%rdi)
# A[22] * B
mulxq 176(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 176(%rdi)
# A[23] * B
mulxq 184(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 184(%rdi)
# A[24] * B
mulxq 192(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 192(%rdi)
# A[25] * B
mulxq 200(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 200(%rdi)
# A[26] * B
mulxq 208(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 208(%rdi)
# A[27] * B
mulxq 216(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 216(%rdi)
# A[28] * B
mulxq 224(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 224(%rdi)
# A[29] * B
mulxq 232(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 232(%rdi)
# A[30] * B
mulxq 240(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 240(%rdi)
# A[31] * B
mulxq 248(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 248(%rdi)
# A[32] * B
mulxq 256(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 256(%rdi)
# A[33] * B
mulxq 264(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 264(%rdi)
# A[34] * B
mulxq 272(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 272(%rdi)
# A[35] * B
mulxq 280(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 280(%rdi)
# A[36] * B
mulxq 288(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 288(%rdi)
# A[37] * B
mulxq 296(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 296(%rdi)
# A[38] * B
mulxq 304(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 304(%rdi)
# A[39] * B
mulxq 312(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 312(%rdi)
# A[40] * B
mulxq 320(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 320(%rdi)
# A[41] * B
mulxq 328(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 328(%rdi)
# A[42] * B
mulxq 336(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 336(%rdi)
# A[43] * B
mulxq 344(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 344(%rdi)
# A[44] * B
mulxq 352(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 352(%rdi)
# A[45] * B
mulxq 360(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 360(%rdi)
# A[46] * B
mulxq 368(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 368(%rdi)
# A[47] * B
mulxq 376(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
adcxq %r11, %r9
movq %r10, 376(%rdi)
movq %r9, 384(%rdi)
repz retq
#ifndef __APPLE__
.size sp_3072_mul_d_avx2_48,.-sp_3072_mul_d_avx2_48
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef _WIN64
/* Divide the double width number (d1|d0) by the dividend. (d1|d0 / div)
*
* d1 The high order half of the number to divide.
* d0 The low order half of the number to divide.
* div The dividend.
* returns the result of the division.
*/
#ifndef __APPLE__
.text
.globl div_3072_word_asm_48
.type div_3072_word_asm_48,@function
.align 16
div_3072_word_asm_48:
#else
.section __TEXT,__text
.globl _div_3072_word_asm_48
.p2align 4
_div_3072_word_asm_48:
#endif /* __APPLE__ */
movq %rdx, %rcx
movq %rsi, %rax
movq %rdi, %rdx
divq %rcx
repz retq
#ifndef __APPLE__
.size div_3072_word_asm_48,.-div_3072_word_asm_48
#endif /* __APPLE__ */
#endif /* _WIN64 */
#ifdef HAVE_INTEL_AVX2
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_3072_cond_sub_avx2_48
.type sp_3072_cond_sub_avx2_48,@function
.align 16
sp_3072_cond_sub_avx2_48:
#else
.section __TEXT,__text
.globl _sp_3072_cond_sub_avx2_48
.p2align 4
_sp_3072_cond_sub_avx2_48:
#endif /* __APPLE__ */
movq (%rdx), %r10
movq (%rsi), %r8
pextq %rcx, %r10, %r10
subq %r10, %r8
movq 8(%rdx), %r10
movq 8(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, (%rdi)
sbbq %r10, %r9
movq 16(%rdx), %r8
movq 16(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 8(%rdi)
sbbq %r8, %r10
movq 24(%rdx), %r9
movq 24(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 16(%rdi)
sbbq %r9, %r8
movq 32(%rdx), %r10
movq 32(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 24(%rdi)
sbbq %r10, %r9
movq 40(%rdx), %r8
movq 40(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 32(%rdi)
sbbq %r8, %r10
movq 48(%rdx), %r9
movq 48(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 40(%rdi)
sbbq %r9, %r8
movq 56(%rdx), %r10
movq 56(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 48(%rdi)
sbbq %r10, %r9
movq 64(%rdx), %r8
movq 64(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 56(%rdi)
sbbq %r8, %r10
movq 72(%rdx), %r9
movq 72(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 64(%rdi)
sbbq %r9, %r8
movq 80(%rdx), %r10
movq 80(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 72(%rdi)
sbbq %r10, %r9
movq 88(%rdx), %r8
movq 88(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 80(%rdi)
sbbq %r8, %r10
movq 96(%rdx), %r9
movq 96(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 88(%rdi)
sbbq %r9, %r8
movq 104(%rdx), %r10
movq 104(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 96(%rdi)
sbbq %r10, %r9
movq 112(%rdx), %r8
movq 112(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 104(%rdi)
sbbq %r8, %r10
movq 120(%rdx), %r9
movq 120(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 112(%rdi)
sbbq %r9, %r8
movq 128(%rdx), %r10
movq 128(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 120(%rdi)
sbbq %r10, %r9
movq 136(%rdx), %r8
movq 136(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 128(%rdi)
sbbq %r8, %r10
movq 144(%rdx), %r9
movq 144(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 136(%rdi)
sbbq %r9, %r8
movq 152(%rdx), %r10
movq 152(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 144(%rdi)
sbbq %r10, %r9
movq 160(%rdx), %r8
movq 160(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 152(%rdi)
sbbq %r8, %r10
movq 168(%rdx), %r9
movq 168(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 160(%rdi)
sbbq %r9, %r8
movq 176(%rdx), %r10
movq 176(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 168(%rdi)
sbbq %r10, %r9
movq 184(%rdx), %r8
movq 184(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 176(%rdi)
sbbq %r8, %r10
movq 192(%rdx), %r9
movq 192(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 184(%rdi)
sbbq %r9, %r8
movq 200(%rdx), %r10
movq 200(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 192(%rdi)
sbbq %r10, %r9
movq 208(%rdx), %r8
movq 208(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 200(%rdi)
sbbq %r8, %r10
movq 216(%rdx), %r9
movq 216(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 208(%rdi)
sbbq %r9, %r8
movq 224(%rdx), %r10
movq 224(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 216(%rdi)
sbbq %r10, %r9
movq 232(%rdx), %r8
movq 232(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 224(%rdi)
sbbq %r8, %r10
movq 240(%rdx), %r9
movq 240(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 232(%rdi)
sbbq %r9, %r8
movq 248(%rdx), %r10
movq 248(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 240(%rdi)
sbbq %r10, %r9
movq 256(%rdx), %r8
movq 256(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 248(%rdi)
sbbq %r8, %r10
movq 264(%rdx), %r9
movq 264(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 256(%rdi)
sbbq %r9, %r8
movq 272(%rdx), %r10
movq 272(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 264(%rdi)
sbbq %r10, %r9
movq 280(%rdx), %r8
movq 280(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 272(%rdi)
sbbq %r8, %r10
movq 288(%rdx), %r9
movq 288(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 280(%rdi)
sbbq %r9, %r8
movq 296(%rdx), %r10
movq 296(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 288(%rdi)
sbbq %r10, %r9
movq 304(%rdx), %r8
movq 304(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 296(%rdi)
sbbq %r8, %r10
movq 312(%rdx), %r9
movq 312(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 304(%rdi)
sbbq %r9, %r8
movq 320(%rdx), %r10
movq 320(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 312(%rdi)
sbbq %r10, %r9
movq 328(%rdx), %r8
movq 328(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 320(%rdi)
sbbq %r8, %r10
movq 336(%rdx), %r9
movq 336(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 328(%rdi)
sbbq %r9, %r8
movq 344(%rdx), %r10
movq 344(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 336(%rdi)
sbbq %r10, %r9
movq 352(%rdx), %r8
movq 352(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 344(%rdi)
sbbq %r8, %r10
movq 360(%rdx), %r9
movq 360(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 352(%rdi)
sbbq %r9, %r8
movq 368(%rdx), %r10
movq 368(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 360(%rdi)
sbbq %r10, %r9
movq 376(%rdx), %r8
movq 376(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 368(%rdi)
sbbq %r8, %r10
movq %r10, 376(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_3072_cond_sub_avx2_48,.-sp_3072_cond_sub_avx2_48
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Compare a with b in constant time.
*
* a A single precision integer.
* b A single precision integer.
* return -ve, 0 or +ve if a is less than, equal to or greater than b
* respectively.
*/
#ifndef __APPLE__
.text
.globl sp_3072_cmp_48
.type sp_3072_cmp_48,@function
.align 16
sp_3072_cmp_48:
#else
.section __TEXT,__text
.globl _sp_3072_cmp_48
.p2align 4
_sp_3072_cmp_48:
#endif /* __APPLE__ */
xorq %rcx, %rcx
movq $-1, %rdx
movq $-1, %rax
movq $0x01, %r8
movq 376(%rdi), %r9
movq 376(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 368(%rdi), %r9
movq 368(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 360(%rdi), %r9
movq 360(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 352(%rdi), %r9
movq 352(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 344(%rdi), %r9
movq 344(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 336(%rdi), %r9
movq 336(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 328(%rdi), %r9
movq 328(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 320(%rdi), %r9
movq 320(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 312(%rdi), %r9
movq 312(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 304(%rdi), %r9
movq 304(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 296(%rdi), %r9
movq 296(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 288(%rdi), %r9
movq 288(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 280(%rdi), %r9
movq 280(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 272(%rdi), %r9
movq 272(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 264(%rdi), %r9
movq 264(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 256(%rdi), %r9
movq 256(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 248(%rdi), %r9
movq 248(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 240(%rdi), %r9
movq 240(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 232(%rdi), %r9
movq 232(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 224(%rdi), %r9
movq 224(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 216(%rdi), %r9
movq 216(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 208(%rdi), %r9
movq 208(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 200(%rdi), %r9
movq 200(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 192(%rdi), %r9
movq 192(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 184(%rdi), %r9
movq 184(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 176(%rdi), %r9
movq 176(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 168(%rdi), %r9
movq 168(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 160(%rdi), %r9
movq 160(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 152(%rdi), %r9
movq 152(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 144(%rdi), %r9
movq 144(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 136(%rdi), %r9
movq 136(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 128(%rdi), %r9
movq 128(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 120(%rdi), %r9
movq 120(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 112(%rdi), %r9
movq 112(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 104(%rdi), %r9
movq 104(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 96(%rdi), %r9
movq 96(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 88(%rdi), %r9
movq 88(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 80(%rdi), %r9
movq 80(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 72(%rdi), %r9
movq 72(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 64(%rdi), %r9
movq 64(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 56(%rdi), %r9
movq 56(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 48(%rdi), %r9
movq 48(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 40(%rdi), %r9
movq 40(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 32(%rdi), %r9
movq 32(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 24(%rdi), %r9
movq 24(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 16(%rdi), %r9
movq 16(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 8(%rdi), %r9
movq 8(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq (%rdi), %r9
movq (%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
xorq %rdx, %rax
repz retq
#ifndef __APPLE__
.size sp_3072_cmp_48,.-sp_3072_cmp_48
#endif /* __APPLE__ */
#ifndef WC_NO_CACHE_RESISTANT
#ifndef __APPLE__
.text
.globl sp_3072_get_from_table_48
.type sp_3072_get_from_table_48,@function
.align 16
sp_3072_get_from_table_48:
#else
.section __TEXT,__text
.globl _sp_3072_get_from_table_48
.p2align 4
_sp_3072_get_from_table_48:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %rdx, %xmm10
movd %rax, %xmm11
pxor %xmm13, %xmm13
pshufd $0x00, %xmm11, %xmm11
pshufd $0x00, %xmm10, %xmm10
# START: 0-7
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 0-7
# START: 8-15
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 8-15
# START: 16-23
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 16-23
# START: 24-31
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 24-31
# START: 32-39
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 32-39
# START: 40-47
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
# END: 40-47
repz retq
#ifndef __APPLE__
.size sp_3072_get_from_table_48,.-sp_3072_get_from_table_48
#endif /* __APPLE__ */
#endif /* !WC_NO_CACHE_RESISTANT */
#ifdef HAVE_INTEL_AVX2
/* Reduce the number back to 3072 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_3072_mont_reduce_avx2_48
.type sp_3072_mont_reduce_avx2_48,@function
.align 16
sp_3072_mont_reduce_avx2_48:
#else
.section __TEXT,__text
.globl _sp_3072_mont_reduce_avx2_48
.p2align 4
_sp_3072_mont_reduce_avx2_48:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %r8
xorq %rbp, %rbp
# i = 48
movq $48, %r9
movq (%rdi), %r12
movq 8(%rdi), %r13
movq 16(%rdi), %r14
movq 24(%rdi), %r15
addq $0xc0, %rdi
xorq %rbp, %rbp
L_3072_mont_reduce_avx2_48_loop:
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r10
imulq %r8, %rdx
xorq %rbx, %rbx
# a[i+0] += m[0] * mu
mulxq (%rsi), %rax, %rcx
movq %r13, %r12
adcxq %rax, %r10
adoxq %rcx, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rax, %rcx
movq %r14, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rax, %rcx
movq %r15, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rax, %rcx
movq -160(%rdi), %r15
adcxq %rax, %r14
adoxq %rcx, %r15
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rax, %rcx
movq -152(%rdi), %r11
adcxq %rax, %r15
adoxq %rcx, %r11
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rax, %rcx
movq -144(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -152(%rdi)
# a[i+6] += m[6] * mu
mulxq 48(%rsi), %rax, %rcx
movq -136(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -144(%rdi)
# a[i+7] += m[7] * mu
mulxq 56(%rsi), %rax, %rcx
movq -128(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -136(%rdi)
# a[i+8] += m[8] * mu
mulxq 64(%rsi), %rax, %rcx
movq -120(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -128(%rdi)
# a[i+9] += m[9] * mu
mulxq 72(%rsi), %rax, %rcx
movq -112(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -120(%rdi)
# a[i+10] += m[10] * mu
mulxq 80(%rsi), %rax, %rcx
movq -104(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -112(%rdi)
# a[i+11] += m[11] * mu
mulxq 88(%rsi), %rax, %rcx
movq -96(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -104(%rdi)
# a[i+12] += m[12] * mu
mulxq 96(%rsi), %rax, %rcx
movq -88(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -96(%rdi)
# a[i+13] += m[13] * mu
mulxq 104(%rsi), %rax, %rcx
movq -80(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -88(%rdi)
# a[i+14] += m[14] * mu
mulxq 112(%rsi), %rax, %rcx
movq -72(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -80(%rdi)
# a[i+15] += m[15] * mu
mulxq 120(%rsi), %rax, %rcx
movq -64(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -72(%rdi)
# a[i+16] += m[16] * mu
mulxq 128(%rsi), %rax, %rcx
movq -56(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -64(%rdi)
# a[i+17] += m[17] * mu
mulxq 136(%rsi), %rax, %rcx
movq -48(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -56(%rdi)
# a[i+18] += m[18] * mu
mulxq 144(%rsi), %rax, %rcx
movq -40(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -48(%rdi)
# a[i+19] += m[19] * mu
mulxq 152(%rsi), %rax, %rcx
movq -32(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -40(%rdi)
# a[i+20] += m[20] * mu
mulxq 160(%rsi), %rax, %rcx
movq -24(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -32(%rdi)
# a[i+21] += m[21] * mu
mulxq 168(%rsi), %rax, %rcx
movq -16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -24(%rdi)
# a[i+22] += m[22] * mu
mulxq 176(%rsi), %rax, %rcx
movq -8(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -16(%rdi)
# a[i+23] += m[23] * mu
mulxq 184(%rsi), %rax, %rcx
movq (%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -8(%rdi)
# a[i+24] += m[24] * mu
mulxq 192(%rsi), %rax, %rcx
movq 8(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, (%rdi)
# a[i+25] += m[25] * mu
mulxq 200(%rsi), %rax, %rcx
movq 16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 8(%rdi)
# a[i+26] += m[26] * mu
mulxq 208(%rsi), %rax, %rcx
movq 24(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 16(%rdi)
# a[i+27] += m[27] * mu
mulxq 216(%rsi), %rax, %rcx
movq 32(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 24(%rdi)
# a[i+28] += m[28] * mu
mulxq 224(%rsi), %rax, %rcx
movq 40(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 32(%rdi)
# a[i+29] += m[29] * mu
mulxq 232(%rsi), %rax, %rcx
movq 48(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 40(%rdi)
# a[i+30] += m[30] * mu
mulxq 240(%rsi), %rax, %rcx
movq 56(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rdi)
# a[i+31] += m[31] * mu
mulxq 248(%rsi), %rax, %rcx
movq 64(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 56(%rdi)
# a[i+32] += m[32] * mu
mulxq 256(%rsi), %rax, %rcx
movq 72(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 64(%rdi)
# a[i+33] += m[33] * mu
mulxq 264(%rsi), %rax, %rcx
movq 80(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 72(%rdi)
# a[i+34] += m[34] * mu
mulxq 272(%rsi), %rax, %rcx
movq 88(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 80(%rdi)
# a[i+35] += m[35] * mu
mulxq 280(%rsi), %rax, %rcx
movq 96(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 88(%rdi)
# a[i+36] += m[36] * mu
mulxq 288(%rsi), %rax, %rcx
movq 104(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 96(%rdi)
# a[i+37] += m[37] * mu
mulxq 296(%rsi), %rax, %rcx
movq 112(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 104(%rdi)
# a[i+38] += m[38] * mu
mulxq 304(%rsi), %rax, %rcx
movq 120(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 112(%rdi)
# a[i+39] += m[39] * mu
mulxq 312(%rsi), %rax, %rcx
movq 128(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 120(%rdi)
# a[i+40] += m[40] * mu
mulxq 320(%rsi), %rax, %rcx
movq 136(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 128(%rdi)
# a[i+41] += m[41] * mu
mulxq 328(%rsi), %rax, %rcx
movq 144(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 136(%rdi)
# a[i+42] += m[42] * mu
mulxq 336(%rsi), %rax, %rcx
movq 152(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 144(%rdi)
# a[i+43] += m[43] * mu
mulxq 344(%rsi), %rax, %rcx
movq 160(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 152(%rdi)
# a[i+44] += m[44] * mu
mulxq 352(%rsi), %rax, %rcx
movq 168(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 160(%rdi)
# a[i+45] += m[45] * mu
mulxq 360(%rsi), %rax, %rcx
movq 176(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 168(%rdi)
# a[i+46] += m[46] * mu
mulxq 368(%rsi), %rax, %rcx
movq 184(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 176(%rdi)
# a[i+47] += m[47] * mu
mulxq 376(%rsi), %rax, %rcx
movq 192(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 184(%rdi)
adcxq %rbp, %r10
movq %rbx, %rbp
movq %r10, 192(%rdi)
adoxq %rbx, %rbp
adcxq %rbx, %rbp
# a += 1
addq $8, %rdi
# i -= 1
subq $0x01, %r9
jnz L_3072_mont_reduce_avx2_48_loop
subq $0xc0, %rdi
negq %rbp
movq %rdi, %r8
subq $0x180, %rdi
movq (%rsi), %rcx
movq %r12, %rdx
pextq %rbp, %rcx, %rcx
subq %rcx, %rdx
movq 8(%rsi), %rcx
movq %r13, %rax
pextq %rbp, %rcx, %rcx
movq %rdx, (%rdi)
sbbq %rcx, %rax
movq 16(%rsi), %rdx
movq %r14, %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 8(%rdi)
sbbq %rdx, %rcx
movq 24(%rsi), %rax
movq %r15, %rdx
pextq %rbp, %rax, %rax
movq %rcx, 16(%rdi)
sbbq %rax, %rdx
movq 32(%rsi), %rcx
movq 32(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 24(%rdi)
sbbq %rcx, %rax
movq 40(%rsi), %rdx
movq 40(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 32(%rdi)
sbbq %rdx, %rcx
movq 48(%rsi), %rax
movq 48(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 40(%rdi)
sbbq %rax, %rdx
movq 56(%rsi), %rcx
movq 56(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 48(%rdi)
sbbq %rcx, %rax
movq 64(%rsi), %rdx
movq 64(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 56(%rdi)
sbbq %rdx, %rcx
movq 72(%rsi), %rax
movq 72(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 64(%rdi)
sbbq %rax, %rdx
movq 80(%rsi), %rcx
movq 80(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 72(%rdi)
sbbq %rcx, %rax
movq 88(%rsi), %rdx
movq 88(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 80(%rdi)
sbbq %rdx, %rcx
movq 96(%rsi), %rax
movq 96(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 88(%rdi)
sbbq %rax, %rdx
movq 104(%rsi), %rcx
movq 104(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 96(%rdi)
sbbq %rcx, %rax
movq 112(%rsi), %rdx
movq 112(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 104(%rdi)
sbbq %rdx, %rcx
movq 120(%rsi), %rax
movq 120(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 112(%rdi)
sbbq %rax, %rdx
movq 128(%rsi), %rcx
movq 128(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 120(%rdi)
sbbq %rcx, %rax
movq 136(%rsi), %rdx
movq 136(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 128(%rdi)
sbbq %rdx, %rcx
movq 144(%rsi), %rax
movq 144(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 136(%rdi)
sbbq %rax, %rdx
movq 152(%rsi), %rcx
movq 152(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 144(%rdi)
sbbq %rcx, %rax
movq 160(%rsi), %rdx
movq 160(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 152(%rdi)
sbbq %rdx, %rcx
movq 168(%rsi), %rax
movq 168(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 160(%rdi)
sbbq %rax, %rdx
movq 176(%rsi), %rcx
movq 176(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 168(%rdi)
sbbq %rcx, %rax
movq 184(%rsi), %rdx
movq 184(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 176(%rdi)
sbbq %rdx, %rcx
movq 192(%rsi), %rax
movq 192(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 184(%rdi)
sbbq %rax, %rdx
movq 200(%rsi), %rcx
movq 200(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 192(%rdi)
sbbq %rcx, %rax
movq 208(%rsi), %rdx
movq 208(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 200(%rdi)
sbbq %rdx, %rcx
movq 216(%rsi), %rax
movq 216(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 208(%rdi)
sbbq %rax, %rdx
movq 224(%rsi), %rcx
movq 224(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 216(%rdi)
sbbq %rcx, %rax
movq 232(%rsi), %rdx
movq 232(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 224(%rdi)
sbbq %rdx, %rcx
movq 240(%rsi), %rax
movq 240(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 232(%rdi)
sbbq %rax, %rdx
movq 248(%rsi), %rcx
movq 248(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 240(%rdi)
sbbq %rcx, %rax
movq 256(%rsi), %rdx
movq 256(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 248(%rdi)
sbbq %rdx, %rcx
movq 264(%rsi), %rax
movq 264(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 256(%rdi)
sbbq %rax, %rdx
movq 272(%rsi), %rcx
movq 272(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 264(%rdi)
sbbq %rcx, %rax
movq 280(%rsi), %rdx
movq 280(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 272(%rdi)
sbbq %rdx, %rcx
movq 288(%rsi), %rax
movq 288(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 280(%rdi)
sbbq %rax, %rdx
movq 296(%rsi), %rcx
movq 296(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 288(%rdi)
sbbq %rcx, %rax
movq 304(%rsi), %rdx
movq 304(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 296(%rdi)
sbbq %rdx, %rcx
movq 312(%rsi), %rax
movq 312(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 304(%rdi)
sbbq %rax, %rdx
movq 320(%rsi), %rcx
movq 320(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 312(%rdi)
sbbq %rcx, %rax
movq 328(%rsi), %rdx
movq 328(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 320(%rdi)
sbbq %rdx, %rcx
movq 336(%rsi), %rax
movq 336(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 328(%rdi)
sbbq %rax, %rdx
movq 344(%rsi), %rcx
movq 344(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 336(%rdi)
sbbq %rcx, %rax
movq 352(%rsi), %rdx
movq 352(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 344(%rdi)
sbbq %rdx, %rcx
movq 360(%rsi), %rax
movq 360(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 352(%rdi)
sbbq %rax, %rdx
movq 368(%rsi), %rcx
movq 368(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 360(%rdi)
sbbq %rcx, %rax
movq 376(%rsi), %rdx
movq 376(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 368(%rdi)
sbbq %rdx, %rcx
movq %rcx, 376(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_3072_mont_reduce_avx2_48,.-sp_3072_mont_reduce_avx2_48
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifndef WC_NO_CACHE_RESISTANT
#ifndef __APPLE__
.text
.globl sp_3072_get_from_table_avx2_48
.type sp_3072_get_from_table_avx2_48,@function
.align 16
sp_3072_get_from_table_avx2_48:
#else
.section __TEXT,__text
.globl _sp_3072_get_from_table_avx2_48
.p2align 4
_sp_3072_get_from_table_avx2_48:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %rdx, %xmm10
movd %rax, %xmm11
vpxor %ymm13, %ymm13, %ymm13
vpermd %ymm10, %ymm13, %ymm10
vpermd %ymm11, %ymm13, %ymm11
# START: 0-15
vpxor %ymm13, %ymm13, %ymm13
vpxor %ymm4, %ymm4, %ymm4
vpxor %ymm5, %ymm5, %ymm5
vpxor %ymm6, %ymm6, %ymm6
vpxor %ymm7, %ymm7, %ymm7
# ENTRY: 0
movq (%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 1
movq 8(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 2
movq 16(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 3
movq 24(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 4
movq 32(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 5
movq 40(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 6
movq 48(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 7
movq 56(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 8
movq 64(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 9
movq 72(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 10
movq 80(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 11
movq 88(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 12
movq 96(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 13
movq 104(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 14
movq 112(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 15
movq 120(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
vmovdqu %ymm4, (%rdi)
vmovdqu %ymm5, 32(%rdi)
vmovdqu %ymm6, 64(%rdi)
vmovdqu %ymm7, 96(%rdi)
addq $0x80, %rdi
# END: 0-15
# START: 16-31
vpxor %ymm13, %ymm13, %ymm13
vpxor %ymm4, %ymm4, %ymm4
vpxor %ymm5, %ymm5, %ymm5
vpxor %ymm6, %ymm6, %ymm6
vpxor %ymm7, %ymm7, %ymm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
vmovdqu %ymm4, (%rdi)
vmovdqu %ymm5, 32(%rdi)
vmovdqu %ymm6, 64(%rdi)
vmovdqu %ymm7, 96(%rdi)
addq $0x80, %rdi
# END: 16-31
# START: 32-47
vpxor %ymm13, %ymm13, %ymm13
vpxor %ymm4, %ymm4, %ymm4
vpxor %ymm5, %ymm5, %ymm5
vpxor %ymm6, %ymm6, %ymm6
vpxor %ymm7, %ymm7, %ymm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
vmovdqu %ymm4, (%rdi)
vmovdqu %ymm5, 32(%rdi)
vmovdqu %ymm6, 64(%rdi)
vmovdqu %ymm7, 96(%rdi)
# END: 32-47
repz retq
#ifndef __APPLE__
.size sp_3072_get_from_table_avx2_48,.-sp_3072_get_from_table_avx2_48
#endif /* __APPLE__ */
#endif /* !WC_NO_CACHE_RESISTANT */
/* Conditionally add a and b using the mask m.
* m is -1 to add and 0 when not.
*
* r A single precision number representing conditional add result.
* a A single precision number to add with.
* b A single precision number to add.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_3072_cond_add_24
.type sp_3072_cond_add_24,@function
.align 16
sp_3072_cond_add_24:
#else
.section __TEXT,__text
.globl _sp_3072_cond_add_24
.p2align 4
_sp_3072_cond_add_24:
#endif /* __APPLE__ */
subq $0xc0, %rsp
movq $0x00, %rax
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 80(%rsp)
movq %r9, 88(%rsp)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 96(%rsp)
movq %r9, 104(%rsp)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 112(%rsp)
movq %r9, 120(%rsp)
movq 128(%rdx), %r8
movq 136(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 128(%rsp)
movq %r9, 136(%rsp)
movq 144(%rdx), %r8
movq 152(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 144(%rsp)
movq %r9, 152(%rsp)
movq 160(%rdx), %r8
movq 168(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 160(%rsp)
movq %r9, 168(%rsp)
movq 176(%rdx), %r8
movq 184(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 176(%rsp)
movq %r9, 184(%rsp)
movq (%rsi), %r8
movq (%rsp), %rdx
addq %rdx, %r8
movq 8(%rsi), %r9
movq 8(%rsp), %rdx
adcq %rdx, %r9
movq %r8, (%rdi)
movq 16(%rsi), %r8
movq 16(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 8(%rdi)
movq 24(%rsi), %r9
movq 24(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 16(%rdi)
movq 32(%rsi), %r8
movq 32(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 24(%rdi)
movq 40(%rsi), %r9
movq 40(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 32(%rdi)
movq 48(%rsi), %r8
movq 48(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 40(%rdi)
movq 56(%rsi), %r9
movq 56(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 48(%rdi)
movq 64(%rsi), %r8
movq 64(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 56(%rdi)
movq 72(%rsi), %r9
movq 72(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 64(%rdi)
movq 80(%rsi), %r8
movq 80(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 72(%rdi)
movq 88(%rsi), %r9
movq 88(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 80(%rdi)
movq 96(%rsi), %r8
movq 96(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 88(%rdi)
movq 104(%rsi), %r9
movq 104(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 96(%rdi)
movq 112(%rsi), %r8
movq 112(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 104(%rdi)
movq 120(%rsi), %r9
movq 120(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 112(%rdi)
movq 128(%rsi), %r8
movq 128(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 120(%rdi)
movq 136(%rsi), %r9
movq 136(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 128(%rdi)
movq 144(%rsi), %r8
movq 144(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 136(%rdi)
movq 152(%rsi), %r9
movq 152(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 144(%rdi)
movq 160(%rsi), %r8
movq 160(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 152(%rdi)
movq 168(%rsi), %r9
movq 168(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 160(%rdi)
movq 176(%rsi), %r8
movq 176(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 168(%rdi)
movq 184(%rsi), %r9
movq 184(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 176(%rdi)
movq %r9, 184(%rdi)
adcq $0x00, %rax
addq $0xc0, %rsp
repz retq
#ifndef __APPLE__
.size sp_3072_cond_add_24,.-sp_3072_cond_add_24
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Conditionally add a and b using the mask m.
* m is -1 to add and 0 when not.
*
* r A single precision number representing conditional add result.
* a A single precision number to add with.
* b A single precision number to add.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_3072_cond_add_avx2_24
.type sp_3072_cond_add_avx2_24,@function
.align 16
sp_3072_cond_add_avx2_24:
#else
.section __TEXT,__text
.globl _sp_3072_cond_add_avx2_24
.p2align 4
_sp_3072_cond_add_avx2_24:
#endif /* __APPLE__ */
movq $0x00, %rax
movq (%rdx), %r10
movq (%rsi), %r8
pextq %rcx, %r10, %r10
addq %r10, %r8
movq 8(%rdx), %r10
movq 8(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, (%rdi)
adcq %r10, %r9
movq 16(%rdx), %r8
movq 16(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 8(%rdi)
adcq %r8, %r10
movq 24(%rdx), %r9
movq 24(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 16(%rdi)
adcq %r9, %r8
movq 32(%rdx), %r10
movq 32(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 24(%rdi)
adcq %r10, %r9
movq 40(%rdx), %r8
movq 40(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 32(%rdi)
adcq %r8, %r10
movq 48(%rdx), %r9
movq 48(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 40(%rdi)
adcq %r9, %r8
movq 56(%rdx), %r10
movq 56(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 48(%rdi)
adcq %r10, %r9
movq 64(%rdx), %r8
movq 64(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 56(%rdi)
adcq %r8, %r10
movq 72(%rdx), %r9
movq 72(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 64(%rdi)
adcq %r9, %r8
movq 80(%rdx), %r10
movq 80(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 72(%rdi)
adcq %r10, %r9
movq 88(%rdx), %r8
movq 88(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 80(%rdi)
adcq %r8, %r10
movq 96(%rdx), %r9
movq 96(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 88(%rdi)
adcq %r9, %r8
movq 104(%rdx), %r10
movq 104(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 96(%rdi)
adcq %r10, %r9
movq 112(%rdx), %r8
movq 112(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 104(%rdi)
adcq %r8, %r10
movq 120(%rdx), %r9
movq 120(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 112(%rdi)
adcq %r9, %r8
movq 128(%rdx), %r10
movq 128(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 120(%rdi)
adcq %r10, %r9
movq 136(%rdx), %r8
movq 136(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 128(%rdi)
adcq %r8, %r10
movq 144(%rdx), %r9
movq 144(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 136(%rdi)
adcq %r9, %r8
movq 152(%rdx), %r10
movq 152(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 144(%rdi)
adcq %r10, %r9
movq 160(%rdx), %r8
movq 160(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 152(%rdi)
adcq %r8, %r10
movq 168(%rdx), %r9
movq 168(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 160(%rdi)
adcq %r9, %r8
movq 176(%rdx), %r10
movq 176(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 168(%rdi)
adcq %r10, %r9
movq 184(%rdx), %r8
movq 184(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 176(%rdi)
adcq %r8, %r10
movq %r10, 184(%rdi)
adcq $0x00, %rax
repz retq
#ifndef __APPLE__
.size sp_3072_cond_add_avx2_24,.-sp_3072_cond_add_avx2_24
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Shift number left by n bit. (r = a << n)
*
* r Result of left shift by n.
* a Number to shift.
* n Amoutnt o shift.
*/
#ifndef __APPLE__
.text
.globl sp_3072_lshift_48
.type sp_3072_lshift_48,@function
.align 16
sp_3072_lshift_48:
#else
.section __TEXT,__text
.globl _sp_3072_lshift_48
.p2align 4
_sp_3072_lshift_48:
#endif /* __APPLE__ */
movb %dl, %cl
movq $0x00, %r10
movq 344(%rsi), %r11
movq 352(%rsi), %rdx
movq 360(%rsi), %rax
movq 368(%rsi), %r8
movq 376(%rsi), %r9
shldq %cl, %r9, %r10
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 352(%rdi)
movq %rax, 360(%rdi)
movq %r8, 368(%rdi)
movq %r9, 376(%rdi)
movq %r10, 384(%rdi)
movq 312(%rsi), %r9
movq 320(%rsi), %rdx
movq 328(%rsi), %rax
movq 336(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 320(%rdi)
movq %rax, 328(%rdi)
movq %r8, 336(%rdi)
movq %r11, 344(%rdi)
movq 280(%rsi), %r11
movq 288(%rsi), %rdx
movq 296(%rsi), %rax
movq 304(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 288(%rdi)
movq %rax, 296(%rdi)
movq %r8, 304(%rdi)
movq %r9, 312(%rdi)
movq 248(%rsi), %r9
movq 256(%rsi), %rdx
movq 264(%rsi), %rax
movq 272(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 256(%rdi)
movq %rax, 264(%rdi)
movq %r8, 272(%rdi)
movq %r11, 280(%rdi)
movq 216(%rsi), %r11
movq 224(%rsi), %rdx
movq 232(%rsi), %rax
movq 240(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 224(%rdi)
movq %rax, 232(%rdi)
movq %r8, 240(%rdi)
movq %r9, 248(%rdi)
movq 184(%rsi), %r9
movq 192(%rsi), %rdx
movq 200(%rsi), %rax
movq 208(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 192(%rdi)
movq %rax, 200(%rdi)
movq %r8, 208(%rdi)
movq %r11, 216(%rdi)
movq 152(%rsi), %r11
movq 160(%rsi), %rdx
movq 168(%rsi), %rax
movq 176(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 160(%rdi)
movq %rax, 168(%rdi)
movq %r8, 176(%rdi)
movq %r9, 184(%rdi)
movq 120(%rsi), %r9
movq 128(%rsi), %rdx
movq 136(%rsi), %rax
movq 144(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 128(%rdi)
movq %rax, 136(%rdi)
movq %r8, 144(%rdi)
movq %r11, 152(%rdi)
movq 88(%rsi), %r11
movq 96(%rsi), %rdx
movq 104(%rsi), %rax
movq 112(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 96(%rdi)
movq %rax, 104(%rdi)
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
movq 56(%rsi), %r9
movq 64(%rsi), %rdx
movq 72(%rsi), %rax
movq 80(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 64(%rdi)
movq %rax, 72(%rdi)
movq %r8, 80(%rdi)
movq %r11, 88(%rdi)
movq 24(%rsi), %r11
movq 32(%rsi), %rdx
movq 40(%rsi), %rax
movq 48(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 32(%rdi)
movq %rax, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shlq %cl, %rdx
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %r8, 16(%rdi)
movq %r11, 24(%rdi)
repz retq
#ifndef __APPLE__
.size sp_3072_lshift_48,.-sp_3072_lshift_48
#endif /* __APPLE__ */
#endif /* !WOLFSSL_SP_NO_3072 */
#endif /* !WOLFSSL_SP_NO_3072 */
#ifdef WOLFSSL_SP_4096
#ifdef WOLFSSL_SP_4096
/* Read big endian unsigned byte array into r.
* Uses the bswap instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_4096_from_bin_bswap
.type sp_4096_from_bin_bswap,@function
.align 16
sp_4096_from_bin_bswap:
#else
.section __TEXT,__text
.globl _sp_4096_from_bin_bswap
.p2align 4
_sp_4096_from_bin_bswap:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $0x200, %r10
xorq %r11, %r11
jmp L_4096_from_bin_bswap_64_end
L_4096_from_bin_bswap_64_start:
subq $0x40, %r9
movq 56(%r9), %rax
movq 48(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq 40(%r9), %rax
movq 32(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movq 24(%r9), %rax
movq 16(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq 8(%r9), %rax
movq (%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_4096_from_bin_bswap_64_end:
cmpq $63, %rcx
jg L_4096_from_bin_bswap_64_start
jmp L_4096_from_bin_bswap_8_end
L_4096_from_bin_bswap_8_start:
subq $8, %r9
movq (%r9), %rax
bswapq %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_4096_from_bin_bswap_8_end:
cmpq $7, %rcx
jg L_4096_from_bin_bswap_8_start
cmpq %r11, %rcx
je L_4096_from_bin_bswap_hi_end
movq %r11, %r8
movq %r11, %rax
L_4096_from_bin_bswap_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_4096_from_bin_bswap_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_4096_from_bin_bswap_hi_end:
cmpq %r10, %rdi
jge L_4096_from_bin_bswap_zero_end
L_4096_from_bin_bswap_zero_start:
movq %r11, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_4096_from_bin_bswap_zero_start
L_4096_from_bin_bswap_zero_end:
repz retq
#ifndef __APPLE__
.size sp_4096_from_bin_bswap,.-sp_4096_from_bin_bswap
#endif /* __APPLE__ */
#ifndef NO_MOVBE_SUPPORT
/* Read big endian unsigned byte array into r.
* Uses the movbe instruction which is an optional instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_4096_from_bin_movbe
.type sp_4096_from_bin_movbe,@function
.align 16
sp_4096_from_bin_movbe:
#else
.section __TEXT,__text
.globl _sp_4096_from_bin_movbe
.p2align 4
_sp_4096_from_bin_movbe:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $0x200, %r10
jmp L_4096_from_bin_movbe_64_end
L_4096_from_bin_movbe_64_start:
subq $0x40, %r9
movbeq 56(%r9), %rax
movbeq 48(%r9), %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movbeq 40(%r9), %rax
movbeq 32(%r9), %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movbeq 24(%r9), %rax
movbeq 16(%r9), %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movbeq 8(%r9), %rax
movbeq (%r9), %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_4096_from_bin_movbe_64_end:
cmpq $63, %rcx
jg L_4096_from_bin_movbe_64_start
jmp L_4096_from_bin_movbe_8_end
L_4096_from_bin_movbe_8_start:
subq $8, %r9
movbeq (%r9), %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_4096_from_bin_movbe_8_end:
cmpq $7, %rcx
jg L_4096_from_bin_movbe_8_start
cmpq $0x00, %rcx
je L_4096_from_bin_movbe_hi_end
movq $0x00, %r8
movq $0x00, %rax
L_4096_from_bin_movbe_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_4096_from_bin_movbe_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_4096_from_bin_movbe_hi_end:
cmpq %r10, %rdi
jge L_4096_from_bin_movbe_zero_end
L_4096_from_bin_movbe_zero_start:
movq $0x00, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_4096_from_bin_movbe_zero_start
L_4096_from_bin_movbe_zero_end:
repz retq
#ifndef __APPLE__
.size sp_4096_from_bin_movbe,.-sp_4096_from_bin_movbe
#endif /* __APPLE__ */
#endif /* !NO_MOVBE_SUPPORT */
/* Write r as big endian to byte array.
* Fixed length number of bytes written: 512
* Uses the bswap instruction.
*
* r A single precision integer.
* a Byte array.
*/
#ifndef __APPLE__
.text
.globl sp_4096_to_bin_bswap_64
.type sp_4096_to_bin_bswap_64,@function
.align 16
sp_4096_to_bin_bswap_64:
#else
.section __TEXT,__text
.globl _sp_4096_to_bin_bswap_64
.p2align 4
_sp_4096_to_bin_bswap_64:
#endif /* __APPLE__ */
movq 504(%rdi), %rdx
movq 496(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, (%rsi)
movq %rax, 8(%rsi)
movq 488(%rdi), %rdx
movq 480(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 16(%rsi)
movq %rax, 24(%rsi)
movq 472(%rdi), %rdx
movq 464(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 32(%rsi)
movq %rax, 40(%rsi)
movq 456(%rdi), %rdx
movq 448(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 48(%rsi)
movq %rax, 56(%rsi)
movq 440(%rdi), %rdx
movq 432(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 64(%rsi)
movq %rax, 72(%rsi)
movq 424(%rdi), %rdx
movq 416(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 80(%rsi)
movq %rax, 88(%rsi)
movq 408(%rdi), %rdx
movq 400(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 96(%rsi)
movq %rax, 104(%rsi)
movq 392(%rdi), %rdx
movq 384(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 112(%rsi)
movq %rax, 120(%rsi)
movq 376(%rdi), %rdx
movq 368(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 128(%rsi)
movq %rax, 136(%rsi)
movq 360(%rdi), %rdx
movq 352(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 144(%rsi)
movq %rax, 152(%rsi)
movq 344(%rdi), %rdx
movq 336(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 160(%rsi)
movq %rax, 168(%rsi)
movq 328(%rdi), %rdx
movq 320(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 176(%rsi)
movq %rax, 184(%rsi)
movq 312(%rdi), %rdx
movq 304(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 192(%rsi)
movq %rax, 200(%rsi)
movq 296(%rdi), %rdx
movq 288(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 208(%rsi)
movq %rax, 216(%rsi)
movq 280(%rdi), %rdx
movq 272(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 224(%rsi)
movq %rax, 232(%rsi)
movq 264(%rdi), %rdx
movq 256(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 240(%rsi)
movq %rax, 248(%rsi)
movq 248(%rdi), %rdx
movq 240(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 256(%rsi)
movq %rax, 264(%rsi)
movq 232(%rdi), %rdx
movq 224(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 272(%rsi)
movq %rax, 280(%rsi)
movq 216(%rdi), %rdx
movq 208(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 288(%rsi)
movq %rax, 296(%rsi)
movq 200(%rdi), %rdx
movq 192(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 304(%rsi)
movq %rax, 312(%rsi)
movq 184(%rdi), %rdx
movq 176(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 320(%rsi)
movq %rax, 328(%rsi)
movq 168(%rdi), %rdx
movq 160(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 336(%rsi)
movq %rax, 344(%rsi)
movq 152(%rdi), %rdx
movq 144(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 352(%rsi)
movq %rax, 360(%rsi)
movq 136(%rdi), %rdx
movq 128(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 368(%rsi)
movq %rax, 376(%rsi)
movq 120(%rdi), %rdx
movq 112(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 384(%rsi)
movq %rax, 392(%rsi)
movq 104(%rdi), %rdx
movq 96(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 400(%rsi)
movq %rax, 408(%rsi)
movq 88(%rdi), %rdx
movq 80(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 416(%rsi)
movq %rax, 424(%rsi)
movq 72(%rdi), %rdx
movq 64(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 432(%rsi)
movq %rax, 440(%rsi)
movq 56(%rdi), %rdx
movq 48(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 448(%rsi)
movq %rax, 456(%rsi)
movq 40(%rdi), %rdx
movq 32(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 464(%rsi)
movq %rax, 472(%rsi)
movq 24(%rdi), %rdx
movq 16(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 480(%rsi)
movq %rax, 488(%rsi)
movq 8(%rdi), %rdx
movq (%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 496(%rsi)
movq %rax, 504(%rsi)
repz retq
#ifndef __APPLE__
.size sp_4096_to_bin_bswap_64,.-sp_4096_to_bin_bswap_64
#endif /* __APPLE__ */
#ifndef NO_MOVBE_SUPPORT
/* Write r as big endian to byte array.
* Fixed length number of bytes written: 512
* Uses the movbe instruction which is optional.
*
* r A single precision integer.
* a Byte array.
*/
#ifndef __APPLE__
.text
.globl sp_4096_to_bin_movbe_64
.type sp_4096_to_bin_movbe_64,@function
.align 16
sp_4096_to_bin_movbe_64:
#else
.section __TEXT,__text
.globl _sp_4096_to_bin_movbe_64
.p2align 4
_sp_4096_to_bin_movbe_64:
#endif /* __APPLE__ */
movbeq 504(%rdi), %rdx
movbeq 496(%rdi), %rax
movq %rdx, (%rsi)
movq %rax, 8(%rsi)
movbeq 488(%rdi), %rdx
movbeq 480(%rdi), %rax
movq %rdx, 16(%rsi)
movq %rax, 24(%rsi)
movbeq 472(%rdi), %rdx
movbeq 464(%rdi), %rax
movq %rdx, 32(%rsi)
movq %rax, 40(%rsi)
movbeq 456(%rdi), %rdx
movbeq 448(%rdi), %rax
movq %rdx, 48(%rsi)
movq %rax, 56(%rsi)
movbeq 440(%rdi), %rdx
movbeq 432(%rdi), %rax
movq %rdx, 64(%rsi)
movq %rax, 72(%rsi)
movbeq 424(%rdi), %rdx
movbeq 416(%rdi), %rax
movq %rdx, 80(%rsi)
movq %rax, 88(%rsi)
movbeq 408(%rdi), %rdx
movbeq 400(%rdi), %rax
movq %rdx, 96(%rsi)
movq %rax, 104(%rsi)
movbeq 392(%rdi), %rdx
movbeq 384(%rdi), %rax
movq %rdx, 112(%rsi)
movq %rax, 120(%rsi)
movbeq 376(%rdi), %rdx
movbeq 368(%rdi), %rax
movq %rdx, 128(%rsi)
movq %rax, 136(%rsi)
movbeq 360(%rdi), %rdx
movbeq 352(%rdi), %rax
movq %rdx, 144(%rsi)
movq %rax, 152(%rsi)
movbeq 344(%rdi), %rdx
movbeq 336(%rdi), %rax
movq %rdx, 160(%rsi)
movq %rax, 168(%rsi)
movbeq 328(%rdi), %rdx
movbeq 320(%rdi), %rax
movq %rdx, 176(%rsi)
movq %rax, 184(%rsi)
movbeq 312(%rdi), %rdx
movbeq 304(%rdi), %rax
movq %rdx, 192(%rsi)
movq %rax, 200(%rsi)
movbeq 296(%rdi), %rdx
movbeq 288(%rdi), %rax
movq %rdx, 208(%rsi)
movq %rax, 216(%rsi)
movbeq 280(%rdi), %rdx
movbeq 272(%rdi), %rax
movq %rdx, 224(%rsi)
movq %rax, 232(%rsi)
movbeq 264(%rdi), %rdx
movbeq 256(%rdi), %rax
movq %rdx, 240(%rsi)
movq %rax, 248(%rsi)
movbeq 248(%rdi), %rdx
movbeq 240(%rdi), %rax
movq %rdx, 256(%rsi)
movq %rax, 264(%rsi)
movbeq 232(%rdi), %rdx
movbeq 224(%rdi), %rax
movq %rdx, 272(%rsi)
movq %rax, 280(%rsi)
movbeq 216(%rdi), %rdx
movbeq 208(%rdi), %rax
movq %rdx, 288(%rsi)
movq %rax, 296(%rsi)
movbeq 200(%rdi), %rdx
movbeq 192(%rdi), %rax
movq %rdx, 304(%rsi)
movq %rax, 312(%rsi)
movbeq 184(%rdi), %rdx
movbeq 176(%rdi), %rax
movq %rdx, 320(%rsi)
movq %rax, 328(%rsi)
movbeq 168(%rdi), %rdx
movbeq 160(%rdi), %rax
movq %rdx, 336(%rsi)
movq %rax, 344(%rsi)
movbeq 152(%rdi), %rdx
movbeq 144(%rdi), %rax
movq %rdx, 352(%rsi)
movq %rax, 360(%rsi)
movbeq 136(%rdi), %rdx
movbeq 128(%rdi), %rax
movq %rdx, 368(%rsi)
movq %rax, 376(%rsi)
movbeq 120(%rdi), %rdx
movbeq 112(%rdi), %rax
movq %rdx, 384(%rsi)
movq %rax, 392(%rsi)
movbeq 104(%rdi), %rdx
movbeq 96(%rdi), %rax
movq %rdx, 400(%rsi)
movq %rax, 408(%rsi)
movbeq 88(%rdi), %rdx
movbeq 80(%rdi), %rax
movq %rdx, 416(%rsi)
movq %rax, 424(%rsi)
movbeq 72(%rdi), %rdx
movbeq 64(%rdi), %rax
movq %rdx, 432(%rsi)
movq %rax, 440(%rsi)
movbeq 56(%rdi), %rdx
movbeq 48(%rdi), %rax
movq %rdx, 448(%rsi)
movq %rax, 456(%rsi)
movbeq 40(%rdi), %rdx
movbeq 32(%rdi), %rax
movq %rdx, 464(%rsi)
movq %rax, 472(%rsi)
movbeq 24(%rdi), %rdx
movbeq 16(%rdi), %rax
movq %rdx, 480(%rsi)
movq %rax, 488(%rsi)
movbeq 8(%rdi), %rdx
movbeq (%rdi), %rax
movq %rdx, 496(%rsi)
movq %rax, 504(%rsi)
repz retq
#ifndef __APPLE__
.size sp_4096_to_bin_movbe_64,.-sp_4096_to_bin_movbe_64
#endif /* __APPLE__ */
#endif /* NO_MOVBE_SUPPORT */
/* Sub b from a into a. (a -= b)
*
* a A single precision integer and result.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_4096_sub_in_place_64
.type sp_4096_sub_in_place_64,@function
.align 16
sp_4096_sub_in_place_64:
#else
.section __TEXT,__text
.globl _sp_4096_sub_in_place_64
.p2align 4
_sp_4096_sub_in_place_64:
#endif /* __APPLE__ */
movq (%rdi), %rdx
subq (%rsi), %rdx
movq 8(%rdi), %rcx
movq %rdx, (%rdi)
sbbq 8(%rsi), %rcx
movq 16(%rdi), %rdx
movq %rcx, 8(%rdi)
sbbq 16(%rsi), %rdx
movq 24(%rdi), %rcx
movq %rdx, 16(%rdi)
sbbq 24(%rsi), %rcx
movq 32(%rdi), %rdx
movq %rcx, 24(%rdi)
sbbq 32(%rsi), %rdx
movq 40(%rdi), %rcx
movq %rdx, 32(%rdi)
sbbq 40(%rsi), %rcx
movq 48(%rdi), %rdx
movq %rcx, 40(%rdi)
sbbq 48(%rsi), %rdx
movq 56(%rdi), %rcx
movq %rdx, 48(%rdi)
sbbq 56(%rsi), %rcx
movq 64(%rdi), %rdx
movq %rcx, 56(%rdi)
sbbq 64(%rsi), %rdx
movq 72(%rdi), %rcx
movq %rdx, 64(%rdi)
sbbq 72(%rsi), %rcx
movq 80(%rdi), %rdx
movq %rcx, 72(%rdi)
sbbq 80(%rsi), %rdx
movq 88(%rdi), %rcx
movq %rdx, 80(%rdi)
sbbq 88(%rsi), %rcx
movq 96(%rdi), %rdx
movq %rcx, 88(%rdi)
sbbq 96(%rsi), %rdx
movq 104(%rdi), %rcx
movq %rdx, 96(%rdi)
sbbq 104(%rsi), %rcx
movq 112(%rdi), %rdx
movq %rcx, 104(%rdi)
sbbq 112(%rsi), %rdx
movq 120(%rdi), %rcx
movq %rdx, 112(%rdi)
sbbq 120(%rsi), %rcx
movq 128(%rdi), %rdx
movq %rcx, 120(%rdi)
sbbq 128(%rsi), %rdx
movq 136(%rdi), %rcx
movq %rdx, 128(%rdi)
sbbq 136(%rsi), %rcx
movq 144(%rdi), %rdx
movq %rcx, 136(%rdi)
sbbq 144(%rsi), %rdx
movq 152(%rdi), %rcx
movq %rdx, 144(%rdi)
sbbq 152(%rsi), %rcx
movq 160(%rdi), %rdx
movq %rcx, 152(%rdi)
sbbq 160(%rsi), %rdx
movq 168(%rdi), %rcx
movq %rdx, 160(%rdi)
sbbq 168(%rsi), %rcx
movq 176(%rdi), %rdx
movq %rcx, 168(%rdi)
sbbq 176(%rsi), %rdx
movq 184(%rdi), %rcx
movq %rdx, 176(%rdi)
sbbq 184(%rsi), %rcx
movq 192(%rdi), %rdx
movq %rcx, 184(%rdi)
sbbq 192(%rsi), %rdx
movq 200(%rdi), %rcx
movq %rdx, 192(%rdi)
sbbq 200(%rsi), %rcx
movq 208(%rdi), %rdx
movq %rcx, 200(%rdi)
sbbq 208(%rsi), %rdx
movq 216(%rdi), %rcx
movq %rdx, 208(%rdi)
sbbq 216(%rsi), %rcx
movq 224(%rdi), %rdx
movq %rcx, 216(%rdi)
sbbq 224(%rsi), %rdx
movq 232(%rdi), %rcx
movq %rdx, 224(%rdi)
sbbq 232(%rsi), %rcx
movq 240(%rdi), %rdx
movq %rcx, 232(%rdi)
sbbq 240(%rsi), %rdx
movq 248(%rdi), %rcx
movq %rdx, 240(%rdi)
sbbq 248(%rsi), %rcx
movq 256(%rdi), %rdx
movq %rcx, 248(%rdi)
sbbq 256(%rsi), %rdx
movq 264(%rdi), %rcx
movq %rdx, 256(%rdi)
sbbq 264(%rsi), %rcx
movq 272(%rdi), %rdx
movq %rcx, 264(%rdi)
sbbq 272(%rsi), %rdx
movq 280(%rdi), %rcx
movq %rdx, 272(%rdi)
sbbq 280(%rsi), %rcx
movq 288(%rdi), %rdx
movq %rcx, 280(%rdi)
sbbq 288(%rsi), %rdx
movq 296(%rdi), %rcx
movq %rdx, 288(%rdi)
sbbq 296(%rsi), %rcx
movq 304(%rdi), %rdx
movq %rcx, 296(%rdi)
sbbq 304(%rsi), %rdx
movq 312(%rdi), %rcx
movq %rdx, 304(%rdi)
sbbq 312(%rsi), %rcx
movq 320(%rdi), %rdx
movq %rcx, 312(%rdi)
sbbq 320(%rsi), %rdx
movq 328(%rdi), %rcx
movq %rdx, 320(%rdi)
sbbq 328(%rsi), %rcx
movq 336(%rdi), %rdx
movq %rcx, 328(%rdi)
sbbq 336(%rsi), %rdx
movq 344(%rdi), %rcx
movq %rdx, 336(%rdi)
sbbq 344(%rsi), %rcx
movq 352(%rdi), %rdx
movq %rcx, 344(%rdi)
sbbq 352(%rsi), %rdx
movq 360(%rdi), %rcx
movq %rdx, 352(%rdi)
sbbq 360(%rsi), %rcx
movq 368(%rdi), %rdx
movq %rcx, 360(%rdi)
sbbq 368(%rsi), %rdx
movq 376(%rdi), %rcx
movq %rdx, 368(%rdi)
sbbq 376(%rsi), %rcx
movq 384(%rdi), %rdx
movq %rcx, 376(%rdi)
sbbq 384(%rsi), %rdx
movq 392(%rdi), %rcx
movq %rdx, 384(%rdi)
sbbq 392(%rsi), %rcx
movq 400(%rdi), %rdx
movq %rcx, 392(%rdi)
sbbq 400(%rsi), %rdx
movq 408(%rdi), %rcx
movq %rdx, 400(%rdi)
sbbq 408(%rsi), %rcx
movq 416(%rdi), %rdx
movq %rcx, 408(%rdi)
sbbq 416(%rsi), %rdx
movq 424(%rdi), %rcx
movq %rdx, 416(%rdi)
sbbq 424(%rsi), %rcx
movq 432(%rdi), %rdx
movq %rcx, 424(%rdi)
sbbq 432(%rsi), %rdx
movq 440(%rdi), %rcx
movq %rdx, 432(%rdi)
sbbq 440(%rsi), %rcx
movq 448(%rdi), %rdx
movq %rcx, 440(%rdi)
sbbq 448(%rsi), %rdx
movq 456(%rdi), %rcx
movq %rdx, 448(%rdi)
sbbq 456(%rsi), %rcx
movq 464(%rdi), %rdx
movq %rcx, 456(%rdi)
sbbq 464(%rsi), %rdx
movq 472(%rdi), %rcx
movq %rdx, 464(%rdi)
sbbq 472(%rsi), %rcx
movq 480(%rdi), %rdx
movq %rcx, 472(%rdi)
sbbq 480(%rsi), %rdx
movq 488(%rdi), %rcx
movq %rdx, 480(%rdi)
sbbq 488(%rsi), %rcx
movq 496(%rdi), %rdx
movq %rcx, 488(%rdi)
sbbq 496(%rsi), %rdx
movq 504(%rdi), %rcx
movq %rdx, 496(%rdi)
sbbq 504(%rsi), %rcx
movq %rcx, 504(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_4096_sub_in_place_64,.-sp_4096_sub_in_place_64
#endif /* __APPLE__ */
/* Add b to a into r. (r = a + b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_4096_add_64
.type sp_4096_add_64,@function
.align 16
sp_4096_add_64:
#else
.section __TEXT,__text
.globl _sp_4096_add_64
.p2align 4
_sp_4096_add_64:
#endif /* __APPLE__ */
# Add
movq (%rsi), %rcx
xorq %rax, %rax
addq (%rdx), %rcx
movq 8(%rsi), %r8
movq %rcx, (%rdi)
adcq 8(%rdx), %r8
movq 16(%rsi), %rcx
movq %r8, 8(%rdi)
adcq 16(%rdx), %rcx
movq 24(%rsi), %r8
movq %rcx, 16(%rdi)
adcq 24(%rdx), %r8
movq 32(%rsi), %rcx
movq %r8, 24(%rdi)
adcq 32(%rdx), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%rdi)
adcq 40(%rdx), %r8
movq 48(%rsi), %rcx
movq %r8, 40(%rdi)
adcq 48(%rdx), %rcx
movq 56(%rsi), %r8
movq %rcx, 48(%rdi)
adcq 56(%rdx), %r8
movq 64(%rsi), %rcx
movq %r8, 56(%rdi)
adcq 64(%rdx), %rcx
movq 72(%rsi), %r8
movq %rcx, 64(%rdi)
adcq 72(%rdx), %r8
movq 80(%rsi), %rcx
movq %r8, 72(%rdi)
adcq 80(%rdx), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%rdi)
adcq 88(%rdx), %r8
movq 96(%rsi), %rcx
movq %r8, 88(%rdi)
adcq 96(%rdx), %rcx
movq 104(%rsi), %r8
movq %rcx, 96(%rdi)
adcq 104(%rdx), %r8
movq 112(%rsi), %rcx
movq %r8, 104(%rdi)
adcq 112(%rdx), %rcx
movq 120(%rsi), %r8
movq %rcx, 112(%rdi)
adcq 120(%rdx), %r8
movq 128(%rsi), %rcx
movq %r8, 120(%rdi)
adcq 128(%rdx), %rcx
movq 136(%rsi), %r8
movq %rcx, 128(%rdi)
adcq 136(%rdx), %r8
movq 144(%rsi), %rcx
movq %r8, 136(%rdi)
adcq 144(%rdx), %rcx
movq 152(%rsi), %r8
movq %rcx, 144(%rdi)
adcq 152(%rdx), %r8
movq 160(%rsi), %rcx
movq %r8, 152(%rdi)
adcq 160(%rdx), %rcx
movq 168(%rsi), %r8
movq %rcx, 160(%rdi)
adcq 168(%rdx), %r8
movq 176(%rsi), %rcx
movq %r8, 168(%rdi)
adcq 176(%rdx), %rcx
movq 184(%rsi), %r8
movq %rcx, 176(%rdi)
adcq 184(%rdx), %r8
movq 192(%rsi), %rcx
movq %r8, 184(%rdi)
adcq 192(%rdx), %rcx
movq 200(%rsi), %r8
movq %rcx, 192(%rdi)
adcq 200(%rdx), %r8
movq 208(%rsi), %rcx
movq %r8, 200(%rdi)
adcq 208(%rdx), %rcx
movq 216(%rsi), %r8
movq %rcx, 208(%rdi)
adcq 216(%rdx), %r8
movq 224(%rsi), %rcx
movq %r8, 216(%rdi)
adcq 224(%rdx), %rcx
movq 232(%rsi), %r8
movq %rcx, 224(%rdi)
adcq 232(%rdx), %r8
movq 240(%rsi), %rcx
movq %r8, 232(%rdi)
adcq 240(%rdx), %rcx
movq 248(%rsi), %r8
movq %rcx, 240(%rdi)
adcq 248(%rdx), %r8
movq 256(%rsi), %rcx
movq %r8, 248(%rdi)
adcq 256(%rdx), %rcx
movq 264(%rsi), %r8
movq %rcx, 256(%rdi)
adcq 264(%rdx), %r8
movq 272(%rsi), %rcx
movq %r8, 264(%rdi)
adcq 272(%rdx), %rcx
movq 280(%rsi), %r8
movq %rcx, 272(%rdi)
adcq 280(%rdx), %r8
movq 288(%rsi), %rcx
movq %r8, 280(%rdi)
adcq 288(%rdx), %rcx
movq 296(%rsi), %r8
movq %rcx, 288(%rdi)
adcq 296(%rdx), %r8
movq 304(%rsi), %rcx
movq %r8, 296(%rdi)
adcq 304(%rdx), %rcx
movq 312(%rsi), %r8
movq %rcx, 304(%rdi)
adcq 312(%rdx), %r8
movq 320(%rsi), %rcx
movq %r8, 312(%rdi)
adcq 320(%rdx), %rcx
movq 328(%rsi), %r8
movq %rcx, 320(%rdi)
adcq 328(%rdx), %r8
movq 336(%rsi), %rcx
movq %r8, 328(%rdi)
adcq 336(%rdx), %rcx
movq 344(%rsi), %r8
movq %rcx, 336(%rdi)
adcq 344(%rdx), %r8
movq 352(%rsi), %rcx
movq %r8, 344(%rdi)
adcq 352(%rdx), %rcx
movq 360(%rsi), %r8
movq %rcx, 352(%rdi)
adcq 360(%rdx), %r8
movq 368(%rsi), %rcx
movq %r8, 360(%rdi)
adcq 368(%rdx), %rcx
movq 376(%rsi), %r8
movq %rcx, 368(%rdi)
adcq 376(%rdx), %r8
movq 384(%rsi), %rcx
movq %r8, 376(%rdi)
adcq 384(%rdx), %rcx
movq 392(%rsi), %r8
movq %rcx, 384(%rdi)
adcq 392(%rdx), %r8
movq 400(%rsi), %rcx
movq %r8, 392(%rdi)
adcq 400(%rdx), %rcx
movq 408(%rsi), %r8
movq %rcx, 400(%rdi)
adcq 408(%rdx), %r8
movq 416(%rsi), %rcx
movq %r8, 408(%rdi)
adcq 416(%rdx), %rcx
movq 424(%rsi), %r8
movq %rcx, 416(%rdi)
adcq 424(%rdx), %r8
movq 432(%rsi), %rcx
movq %r8, 424(%rdi)
adcq 432(%rdx), %rcx
movq 440(%rsi), %r8
movq %rcx, 432(%rdi)
adcq 440(%rdx), %r8
movq 448(%rsi), %rcx
movq %r8, 440(%rdi)
adcq 448(%rdx), %rcx
movq 456(%rsi), %r8
movq %rcx, 448(%rdi)
adcq 456(%rdx), %r8
movq 464(%rsi), %rcx
movq %r8, 456(%rdi)
adcq 464(%rdx), %rcx
movq 472(%rsi), %r8
movq %rcx, 464(%rdi)
adcq 472(%rdx), %r8
movq 480(%rsi), %rcx
movq %r8, 472(%rdi)
adcq 480(%rdx), %rcx
movq 488(%rsi), %r8
movq %rcx, 480(%rdi)
adcq 488(%rdx), %r8
movq 496(%rsi), %rcx
movq %r8, 488(%rdi)
adcq 496(%rdx), %rcx
movq 504(%rsi), %r8
movq %rcx, 496(%rdi)
adcq 504(%rdx), %r8
movq %r8, 504(%rdi)
adcq $0x00, %rax
repz retq
#ifndef __APPLE__
.size sp_4096_add_64,.-sp_4096_add_64
#endif /* __APPLE__ */
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_4096_mul_64
.type sp_4096_mul_64,@function
.align 16
sp_4096_mul_64:
#else
.section __TEXT,__text
.globl _sp_4096_mul_64
.p2align 4
_sp_4096_mul_64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x628, %rsp
movq %rdi, 1536(%rsp)
movq %rsi, 1544(%rsp)
movq %rdx, 1552(%rsp)
leaq 1024(%rsp), %r10
leaq 256(%rsi), %r12
# Add
movq (%rsi), %rax
xorq %r13, %r13
addq (%r12), %rax
movq 8(%rsi), %rcx
movq %rax, (%r10)
adcq 8(%r12), %rcx
movq 16(%rsi), %r8
movq %rcx, 8(%r10)
adcq 16(%r12), %r8
movq 24(%rsi), %rax
movq %r8, 16(%r10)
adcq 24(%r12), %rax
movq 32(%rsi), %rcx
movq %rax, 24(%r10)
adcq 32(%r12), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%r10)
adcq 40(%r12), %r8
movq 48(%rsi), %rax
movq %r8, 40(%r10)
adcq 48(%r12), %rax
movq 56(%rsi), %rcx
movq %rax, 48(%r10)
adcq 56(%r12), %rcx
movq 64(%rsi), %r8
movq %rcx, 56(%r10)
adcq 64(%r12), %r8
movq 72(%rsi), %rax
movq %r8, 64(%r10)
adcq 72(%r12), %rax
movq 80(%rsi), %rcx
movq %rax, 72(%r10)
adcq 80(%r12), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%r10)
adcq 88(%r12), %r8
movq 96(%rsi), %rax
movq %r8, 88(%r10)
adcq 96(%r12), %rax
movq 104(%rsi), %rcx
movq %rax, 96(%r10)
adcq 104(%r12), %rcx
movq 112(%rsi), %r8
movq %rcx, 104(%r10)
adcq 112(%r12), %r8
movq 120(%rsi), %rax
movq %r8, 112(%r10)
adcq 120(%r12), %rax
movq 128(%rsi), %rcx
movq %rax, 120(%r10)
adcq 128(%r12), %rcx
movq 136(%rsi), %r8
movq %rcx, 128(%r10)
adcq 136(%r12), %r8
movq 144(%rsi), %rax
movq %r8, 136(%r10)
adcq 144(%r12), %rax
movq 152(%rsi), %rcx
movq %rax, 144(%r10)
adcq 152(%r12), %rcx
movq 160(%rsi), %r8
movq %rcx, 152(%r10)
adcq 160(%r12), %r8
movq 168(%rsi), %rax
movq %r8, 160(%r10)
adcq 168(%r12), %rax
movq 176(%rsi), %rcx
movq %rax, 168(%r10)
adcq 176(%r12), %rcx
movq 184(%rsi), %r8
movq %rcx, 176(%r10)
adcq 184(%r12), %r8
movq 192(%rsi), %rax
movq %r8, 184(%r10)
adcq 192(%r12), %rax
movq 200(%rsi), %rcx
movq %rax, 192(%r10)
adcq 200(%r12), %rcx
movq 208(%rsi), %r8
movq %rcx, 200(%r10)
adcq 208(%r12), %r8
movq 216(%rsi), %rax
movq %r8, 208(%r10)
adcq 216(%r12), %rax
movq 224(%rsi), %rcx
movq %rax, 216(%r10)
adcq 224(%r12), %rcx
movq 232(%rsi), %r8
movq %rcx, 224(%r10)
adcq 232(%r12), %r8
movq 240(%rsi), %rax
movq %r8, 232(%r10)
adcq 240(%r12), %rax
movq 248(%rsi), %rcx
movq %rax, 240(%r10)
adcq 248(%r12), %rcx
movq %rcx, 248(%r10)
adcq $0x00, %r13
movq %r13, 1560(%rsp)
leaq 1280(%rsp), %r11
leaq 256(%rdx), %r12
# Add
movq (%rdx), %rax
xorq %r14, %r14
addq (%r12), %rax
movq 8(%rdx), %rcx
movq %rax, (%r11)
adcq 8(%r12), %rcx
movq 16(%rdx), %r8
movq %rcx, 8(%r11)
adcq 16(%r12), %r8
movq 24(%rdx), %rax
movq %r8, 16(%r11)
adcq 24(%r12), %rax
movq 32(%rdx), %rcx
movq %rax, 24(%r11)
adcq 32(%r12), %rcx
movq 40(%rdx), %r8
movq %rcx, 32(%r11)
adcq 40(%r12), %r8
movq 48(%rdx), %rax
movq %r8, 40(%r11)
adcq 48(%r12), %rax
movq 56(%rdx), %rcx
movq %rax, 48(%r11)
adcq 56(%r12), %rcx
movq 64(%rdx), %r8
movq %rcx, 56(%r11)
adcq 64(%r12), %r8
movq 72(%rdx), %rax
movq %r8, 64(%r11)
adcq 72(%r12), %rax
movq 80(%rdx), %rcx
movq %rax, 72(%r11)
adcq 80(%r12), %rcx
movq 88(%rdx), %r8
movq %rcx, 80(%r11)
adcq 88(%r12), %r8
movq 96(%rdx), %rax
movq %r8, 88(%r11)
adcq 96(%r12), %rax
movq 104(%rdx), %rcx
movq %rax, 96(%r11)
adcq 104(%r12), %rcx
movq 112(%rdx), %r8
movq %rcx, 104(%r11)
adcq 112(%r12), %r8
movq 120(%rdx), %rax
movq %r8, 112(%r11)
adcq 120(%r12), %rax
movq 128(%rdx), %rcx
movq %rax, 120(%r11)
adcq 128(%r12), %rcx
movq 136(%rdx), %r8
movq %rcx, 128(%r11)
adcq 136(%r12), %r8
movq 144(%rdx), %rax
movq %r8, 136(%r11)
adcq 144(%r12), %rax
movq 152(%rdx), %rcx
movq %rax, 144(%r11)
adcq 152(%r12), %rcx
movq 160(%rdx), %r8
movq %rcx, 152(%r11)
adcq 160(%r12), %r8
movq 168(%rdx), %rax
movq %r8, 160(%r11)
adcq 168(%r12), %rax
movq 176(%rdx), %rcx
movq %rax, 168(%r11)
adcq 176(%r12), %rcx
movq 184(%rdx), %r8
movq %rcx, 176(%r11)
adcq 184(%r12), %r8
movq 192(%rdx), %rax
movq %r8, 184(%r11)
adcq 192(%r12), %rax
movq 200(%rdx), %rcx
movq %rax, 192(%r11)
adcq 200(%r12), %rcx
movq 208(%rdx), %r8
movq %rcx, 200(%r11)
adcq 208(%r12), %r8
movq 216(%rdx), %rax
movq %r8, 208(%r11)
adcq 216(%r12), %rax
movq 224(%rdx), %rcx
movq %rax, 216(%r11)
adcq 224(%r12), %rcx
movq 232(%rdx), %r8
movq %rcx, 224(%r11)
adcq 232(%r12), %r8
movq 240(%rdx), %rax
movq %r8, 232(%r11)
adcq 240(%r12), %rax
movq 248(%rdx), %rcx
movq %rax, 240(%r11)
adcq 248(%r12), %rcx
movq %rcx, 248(%r11)
adcq $0x00, %r14
movq %r14, 1568(%rsp)
movq %r11, %rdx
movq %r10, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_2048_mul_32@plt
#else
callq _sp_2048_mul_32
#endif /* __APPLE__ */
movq 1552(%rsp), %rdx
movq 1544(%rsp), %rsi
leaq 512(%rsp), %rdi
addq $0x100, %rdx
addq $0x100, %rsi
#ifndef __APPLE__
callq sp_2048_mul_32@plt
#else
callq _sp_2048_mul_32
#endif /* __APPLE__ */
movq 1552(%rsp), %rdx
movq 1544(%rsp), %rsi
movq 1536(%rsp), %rdi
#ifndef __APPLE__
callq sp_2048_mul_32@plt
#else
callq _sp_2048_mul_32
#endif /* __APPLE__ */
#ifdef _WIN64
movq 1552(%rsp), %rdx
movq 1544(%rsp), %rsi
movq 1536(%rsp), %rdi
#endif /* _WIN64 */
movq 1560(%rsp), %r13
movq 1568(%rsp), %r14
movq 1536(%rsp), %r15
movq %r13, %r9
leaq 1024(%rsp), %r10
leaq 1280(%rsp), %r11
andq %r14, %r9
negq %r13
negq %r14
addq $0x200, %r15
movq (%r10), %rax
movq (%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, (%r10)
movq %rcx, (%r11)
movq 8(%r10), %rax
movq 8(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 8(%r10)
movq %rcx, 8(%r11)
movq 16(%r10), %rax
movq 16(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 16(%r10)
movq %rcx, 16(%r11)
movq 24(%r10), %rax
movq 24(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 24(%r10)
movq %rcx, 24(%r11)
movq 32(%r10), %rax
movq 32(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 32(%r10)
movq %rcx, 32(%r11)
movq 40(%r10), %rax
movq 40(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 40(%r10)
movq %rcx, 40(%r11)
movq 48(%r10), %rax
movq 48(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 48(%r10)
movq %rcx, 48(%r11)
movq 56(%r10), %rax
movq 56(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 56(%r10)
movq %rcx, 56(%r11)
movq 64(%r10), %rax
movq 64(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 64(%r10)
movq %rcx, 64(%r11)
movq 72(%r10), %rax
movq 72(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 72(%r10)
movq %rcx, 72(%r11)
movq 80(%r10), %rax
movq 80(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 80(%r10)
movq %rcx, 80(%r11)
movq 88(%r10), %rax
movq 88(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 88(%r10)
movq %rcx, 88(%r11)
movq 96(%r10), %rax
movq 96(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 96(%r10)
movq %rcx, 96(%r11)
movq 104(%r10), %rax
movq 104(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 104(%r10)
movq %rcx, 104(%r11)
movq 112(%r10), %rax
movq 112(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 112(%r10)
movq %rcx, 112(%r11)
movq 120(%r10), %rax
movq 120(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 120(%r10)
movq %rcx, 120(%r11)
movq 128(%r10), %rax
movq 128(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 128(%r10)
movq %rcx, 128(%r11)
movq 136(%r10), %rax
movq 136(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 136(%r10)
movq %rcx, 136(%r11)
movq 144(%r10), %rax
movq 144(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 144(%r10)
movq %rcx, 144(%r11)
movq 152(%r10), %rax
movq 152(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 152(%r10)
movq %rcx, 152(%r11)
movq 160(%r10), %rax
movq 160(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 160(%r10)
movq %rcx, 160(%r11)
movq 168(%r10), %rax
movq 168(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 168(%r10)
movq %rcx, 168(%r11)
movq 176(%r10), %rax
movq 176(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 176(%r10)
movq %rcx, 176(%r11)
movq 184(%r10), %rax
movq 184(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 184(%r10)
movq %rcx, 184(%r11)
movq 192(%r10), %rax
movq 192(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 192(%r10)
movq %rcx, 192(%r11)
movq 200(%r10), %rax
movq 200(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 200(%r10)
movq %rcx, 200(%r11)
movq 208(%r10), %rax
movq 208(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 208(%r10)
movq %rcx, 208(%r11)
movq 216(%r10), %rax
movq 216(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 216(%r10)
movq %rcx, 216(%r11)
movq 224(%r10), %rax
movq 224(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 224(%r10)
movq %rcx, 224(%r11)
movq 232(%r10), %rax
movq 232(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 232(%r10)
movq %rcx, 232(%r11)
movq 240(%r10), %rax
movq 240(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 240(%r10)
movq %rcx, 240(%r11)
movq 248(%r10), %rax
movq 248(%r11), %rcx
andq %r14, %rax
andq %r13, %rcx
movq %rax, 248(%r10)
movq %rcx, 248(%r11)
movq (%r10), %rax
addq (%r11), %rax
movq 8(%r10), %rcx
movq %rax, (%r15)
adcq 8(%r11), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r15)
adcq 16(%r11), %r8
movq 24(%r10), %rax
movq %r8, 16(%r15)
adcq 24(%r11), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r15)
adcq 32(%r11), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r15)
adcq 40(%r11), %r8
movq 48(%r10), %rax
movq %r8, 40(%r15)
adcq 48(%r11), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r15)
adcq 56(%r11), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r15)
adcq 64(%r11), %r8
movq 72(%r10), %rax
movq %r8, 64(%r15)
adcq 72(%r11), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r15)
adcq 80(%r11), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r15)
adcq 88(%r11), %r8
movq 96(%r10), %rax
movq %r8, 88(%r15)
adcq 96(%r11), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r15)
adcq 104(%r11), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r15)
adcq 112(%r11), %r8
movq 120(%r10), %rax
movq %r8, 112(%r15)
adcq 120(%r11), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r15)
adcq 128(%r11), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r15)
adcq 136(%r11), %r8
movq 144(%r10), %rax
movq %r8, 136(%r15)
adcq 144(%r11), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r15)
adcq 152(%r11), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r15)
adcq 160(%r11), %r8
movq 168(%r10), %rax
movq %r8, 160(%r15)
adcq 168(%r11), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r15)
adcq 176(%r11), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r15)
adcq 184(%r11), %r8
movq 192(%r10), %rax
movq %r8, 184(%r15)
adcq 192(%r11), %rax
movq 200(%r10), %rcx
movq %rax, 192(%r15)
adcq 200(%r11), %rcx
movq 208(%r10), %r8
movq %rcx, 200(%r15)
adcq 208(%r11), %r8
movq 216(%r10), %rax
movq %r8, 208(%r15)
adcq 216(%r11), %rax
movq 224(%r10), %rcx
movq %rax, 216(%r15)
adcq 224(%r11), %rcx
movq 232(%r10), %r8
movq %rcx, 224(%r15)
adcq 232(%r11), %r8
movq 240(%r10), %rax
movq %r8, 232(%r15)
adcq 240(%r11), %rax
movq 248(%r10), %rcx
movq %rax, 240(%r15)
adcq 248(%r11), %rcx
movq %rcx, 248(%r15)
adcq $0x00, %r9
leaq 512(%rsp), %r11
movq %rsp, %r10
movq (%r10), %rax
subq (%r11), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%r11), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%r11), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%r11), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%r11), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%r11), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%r11), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%r11), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%r11), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%r11), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%r11), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%r11), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%r11), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%r11), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%r11), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%r11), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%r11), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%r11), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%r11), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%r11), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%r11), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%r11), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%r11), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%r11), %r8
movq 192(%r10), %rax
movq %r8, 184(%r10)
sbbq 192(%r11), %rax
movq 200(%r10), %rcx
movq %rax, 192(%r10)
sbbq 200(%r11), %rcx
movq 208(%r10), %r8
movq %rcx, 200(%r10)
sbbq 208(%r11), %r8
movq 216(%r10), %rax
movq %r8, 208(%r10)
sbbq 216(%r11), %rax
movq 224(%r10), %rcx
movq %rax, 216(%r10)
sbbq 224(%r11), %rcx
movq 232(%r10), %r8
movq %rcx, 224(%r10)
sbbq 232(%r11), %r8
movq 240(%r10), %rax
movq %r8, 232(%r10)
sbbq 240(%r11), %rax
movq 248(%r10), %rcx
movq %rax, 240(%r10)
sbbq 248(%r11), %rcx
movq 256(%r10), %r8
movq %rcx, 248(%r10)
sbbq 256(%r11), %r8
movq 264(%r10), %rax
movq %r8, 256(%r10)
sbbq 264(%r11), %rax
movq 272(%r10), %rcx
movq %rax, 264(%r10)
sbbq 272(%r11), %rcx
movq 280(%r10), %r8
movq %rcx, 272(%r10)
sbbq 280(%r11), %r8
movq 288(%r10), %rax
movq %r8, 280(%r10)
sbbq 288(%r11), %rax
movq 296(%r10), %rcx
movq %rax, 288(%r10)
sbbq 296(%r11), %rcx
movq 304(%r10), %r8
movq %rcx, 296(%r10)
sbbq 304(%r11), %r8
movq 312(%r10), %rax
movq %r8, 304(%r10)
sbbq 312(%r11), %rax
movq 320(%r10), %rcx
movq %rax, 312(%r10)
sbbq 320(%r11), %rcx
movq 328(%r10), %r8
movq %rcx, 320(%r10)
sbbq 328(%r11), %r8
movq 336(%r10), %rax
movq %r8, 328(%r10)
sbbq 336(%r11), %rax
movq 344(%r10), %rcx
movq %rax, 336(%r10)
sbbq 344(%r11), %rcx
movq 352(%r10), %r8
movq %rcx, 344(%r10)
sbbq 352(%r11), %r8
movq 360(%r10), %rax
movq %r8, 352(%r10)
sbbq 360(%r11), %rax
movq 368(%r10), %rcx
movq %rax, 360(%r10)
sbbq 368(%r11), %rcx
movq 376(%r10), %r8
movq %rcx, 368(%r10)
sbbq 376(%r11), %r8
movq 384(%r10), %rax
movq %r8, 376(%r10)
sbbq 384(%r11), %rax
movq 392(%r10), %rcx
movq %rax, 384(%r10)
sbbq 392(%r11), %rcx
movq 400(%r10), %r8
movq %rcx, 392(%r10)
sbbq 400(%r11), %r8
movq 408(%r10), %rax
movq %r8, 400(%r10)
sbbq 408(%r11), %rax
movq 416(%r10), %rcx
movq %rax, 408(%r10)
sbbq 416(%r11), %rcx
movq 424(%r10), %r8
movq %rcx, 416(%r10)
sbbq 424(%r11), %r8
movq 432(%r10), %rax
movq %r8, 424(%r10)
sbbq 432(%r11), %rax
movq 440(%r10), %rcx
movq %rax, 432(%r10)
sbbq 440(%r11), %rcx
movq 448(%r10), %r8
movq %rcx, 440(%r10)
sbbq 448(%r11), %r8
movq 456(%r10), %rax
movq %r8, 448(%r10)
sbbq 456(%r11), %rax
movq 464(%r10), %rcx
movq %rax, 456(%r10)
sbbq 464(%r11), %rcx
movq 472(%r10), %r8
movq %rcx, 464(%r10)
sbbq 472(%r11), %r8
movq 480(%r10), %rax
movq %r8, 472(%r10)
sbbq 480(%r11), %rax
movq 488(%r10), %rcx
movq %rax, 480(%r10)
sbbq 488(%r11), %rcx
movq 496(%r10), %r8
movq %rcx, 488(%r10)
sbbq 496(%r11), %r8
movq 504(%r10), %rax
movq %r8, 496(%r10)
sbbq 504(%r11), %rax
movq %rax, 504(%r10)
sbbq $0x00, %r9
movq (%r10), %rax
subq (%rdi), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%rdi), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%rdi), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%rdi), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%rdi), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%rdi), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%rdi), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%rdi), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%rdi), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%rdi), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%rdi), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%rdi), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%rdi), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%rdi), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%rdi), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%rdi), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%rdi), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%rdi), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%rdi), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%rdi), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%rdi), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%rdi), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%rdi), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%rdi), %r8
movq 192(%r10), %rax
movq %r8, 184(%r10)
sbbq 192(%rdi), %rax
movq 200(%r10), %rcx
movq %rax, 192(%r10)
sbbq 200(%rdi), %rcx
movq 208(%r10), %r8
movq %rcx, 200(%r10)
sbbq 208(%rdi), %r8
movq 216(%r10), %rax
movq %r8, 208(%r10)
sbbq 216(%rdi), %rax
movq 224(%r10), %rcx
movq %rax, 216(%r10)
sbbq 224(%rdi), %rcx
movq 232(%r10), %r8
movq %rcx, 224(%r10)
sbbq 232(%rdi), %r8
movq 240(%r10), %rax
movq %r8, 232(%r10)
sbbq 240(%rdi), %rax
movq 248(%r10), %rcx
movq %rax, 240(%r10)
sbbq 248(%rdi), %rcx
movq 256(%r10), %r8
movq %rcx, 248(%r10)
sbbq 256(%rdi), %r8
movq 264(%r10), %rax
movq %r8, 256(%r10)
sbbq 264(%rdi), %rax
movq 272(%r10), %rcx
movq %rax, 264(%r10)
sbbq 272(%rdi), %rcx
movq 280(%r10), %r8
movq %rcx, 272(%r10)
sbbq 280(%rdi), %r8
movq 288(%r10), %rax
movq %r8, 280(%r10)
sbbq 288(%rdi), %rax
movq 296(%r10), %rcx
movq %rax, 288(%r10)
sbbq 296(%rdi), %rcx
movq 304(%r10), %r8
movq %rcx, 296(%r10)
sbbq 304(%rdi), %r8
movq 312(%r10), %rax
movq %r8, 304(%r10)
sbbq 312(%rdi), %rax
movq 320(%r10), %rcx
movq %rax, 312(%r10)
sbbq 320(%rdi), %rcx
movq 328(%r10), %r8
movq %rcx, 320(%r10)
sbbq 328(%rdi), %r8
movq 336(%r10), %rax
movq %r8, 328(%r10)
sbbq 336(%rdi), %rax
movq 344(%r10), %rcx
movq %rax, 336(%r10)
sbbq 344(%rdi), %rcx
movq 352(%r10), %r8
movq %rcx, 344(%r10)
sbbq 352(%rdi), %r8
movq 360(%r10), %rax
movq %r8, 352(%r10)
sbbq 360(%rdi), %rax
movq 368(%r10), %rcx
movq %rax, 360(%r10)
sbbq 368(%rdi), %rcx
movq 376(%r10), %r8
movq %rcx, 368(%r10)
sbbq 376(%rdi), %r8
movq 384(%r10), %rax
movq %r8, 376(%r10)
sbbq 384(%rdi), %rax
movq 392(%r10), %rcx
movq %rax, 384(%r10)
sbbq 392(%rdi), %rcx
movq 400(%r10), %r8
movq %rcx, 392(%r10)
sbbq 400(%rdi), %r8
movq 408(%r10), %rax
movq %r8, 400(%r10)
sbbq 408(%rdi), %rax
movq 416(%r10), %rcx
movq %rax, 408(%r10)
sbbq 416(%rdi), %rcx
movq 424(%r10), %r8
movq %rcx, 416(%r10)
sbbq 424(%rdi), %r8
movq 432(%r10), %rax
movq %r8, 424(%r10)
sbbq 432(%rdi), %rax
movq 440(%r10), %rcx
movq %rax, 432(%r10)
sbbq 440(%rdi), %rcx
movq 448(%r10), %r8
movq %rcx, 440(%r10)
sbbq 448(%rdi), %r8
movq 456(%r10), %rax
movq %r8, 448(%r10)
sbbq 456(%rdi), %rax
movq 464(%r10), %rcx
movq %rax, 456(%r10)
sbbq 464(%rdi), %rcx
movq 472(%r10), %r8
movq %rcx, 464(%r10)
sbbq 472(%rdi), %r8
movq 480(%r10), %rax
movq %r8, 472(%r10)
sbbq 480(%rdi), %rax
movq 488(%r10), %rcx
movq %rax, 480(%r10)
sbbq 488(%rdi), %rcx
movq 496(%r10), %r8
movq %rcx, 488(%r10)
sbbq 496(%rdi), %r8
movq 504(%r10), %rax
movq %r8, 496(%r10)
sbbq 504(%rdi), %rax
movq %rax, 504(%r10)
sbbq $0x00, %r9
subq $0x100, %r15
# Add
movq (%r15), %rax
addq (%r10), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r10), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r10), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r10), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r10), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r10), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r10), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r10), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r10), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r10), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r10), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r10), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r10), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r10), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r10), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r10), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r10), %rcx
movq 136(%r15), %r8
movq %rcx, 128(%r15)
adcq 136(%r10), %r8
movq 144(%r15), %rax
movq %r8, 136(%r15)
adcq 144(%r10), %rax
movq 152(%r15), %rcx
movq %rax, 144(%r15)
adcq 152(%r10), %rcx
movq 160(%r15), %r8
movq %rcx, 152(%r15)
adcq 160(%r10), %r8
movq 168(%r15), %rax
movq %r8, 160(%r15)
adcq 168(%r10), %rax
movq 176(%r15), %rcx
movq %rax, 168(%r15)
adcq 176(%r10), %rcx
movq 184(%r15), %r8
movq %rcx, 176(%r15)
adcq 184(%r10), %r8
movq 192(%r15), %rax
movq %r8, 184(%r15)
adcq 192(%r10), %rax
movq 200(%r15), %rcx
movq %rax, 192(%r15)
adcq 200(%r10), %rcx
movq 208(%r15), %r8
movq %rcx, 200(%r15)
adcq 208(%r10), %r8
movq 216(%r15), %rax
movq %r8, 208(%r15)
adcq 216(%r10), %rax
movq 224(%r15), %rcx
movq %rax, 216(%r15)
adcq 224(%r10), %rcx
movq 232(%r15), %r8
movq %rcx, 224(%r15)
adcq 232(%r10), %r8
movq 240(%r15), %rax
movq %r8, 232(%r15)
adcq 240(%r10), %rax
movq 248(%r15), %rcx
movq %rax, 240(%r15)
adcq 248(%r10), %rcx
movq 256(%r15), %r8
movq %rcx, 248(%r15)
adcq 256(%r10), %r8
movq 264(%r15), %rax
movq %r8, 256(%r15)
adcq 264(%r10), %rax
movq 272(%r15), %rcx
movq %rax, 264(%r15)
adcq 272(%r10), %rcx
movq 280(%r15), %r8
movq %rcx, 272(%r15)
adcq 280(%r10), %r8
movq 288(%r15), %rax
movq %r8, 280(%r15)
adcq 288(%r10), %rax
movq 296(%r15), %rcx
movq %rax, 288(%r15)
adcq 296(%r10), %rcx
movq 304(%r15), %r8
movq %rcx, 296(%r15)
adcq 304(%r10), %r8
movq 312(%r15), %rax
movq %r8, 304(%r15)
adcq 312(%r10), %rax
movq 320(%r15), %rcx
movq %rax, 312(%r15)
adcq 320(%r10), %rcx
movq 328(%r15), %r8
movq %rcx, 320(%r15)
adcq 328(%r10), %r8
movq 336(%r15), %rax
movq %r8, 328(%r15)
adcq 336(%r10), %rax
movq 344(%r15), %rcx
movq %rax, 336(%r15)
adcq 344(%r10), %rcx
movq 352(%r15), %r8
movq %rcx, 344(%r15)
adcq 352(%r10), %r8
movq 360(%r15), %rax
movq %r8, 352(%r15)
adcq 360(%r10), %rax
movq 368(%r15), %rcx
movq %rax, 360(%r15)
adcq 368(%r10), %rcx
movq 376(%r15), %r8
movq %rcx, 368(%r15)
adcq 376(%r10), %r8
movq 384(%r15), %rax
movq %r8, 376(%r15)
adcq 384(%r10), %rax
movq 392(%r15), %rcx
movq %rax, 384(%r15)
adcq 392(%r10), %rcx
movq 400(%r15), %r8
movq %rcx, 392(%r15)
adcq 400(%r10), %r8
movq 408(%r15), %rax
movq %r8, 400(%r15)
adcq 408(%r10), %rax
movq 416(%r15), %rcx
movq %rax, 408(%r15)
adcq 416(%r10), %rcx
movq 424(%r15), %r8
movq %rcx, 416(%r15)
adcq 424(%r10), %r8
movq 432(%r15), %rax
movq %r8, 424(%r15)
adcq 432(%r10), %rax
movq 440(%r15), %rcx
movq %rax, 432(%r15)
adcq 440(%r10), %rcx
movq 448(%r15), %r8
movq %rcx, 440(%r15)
adcq 448(%r10), %r8
movq 456(%r15), %rax
movq %r8, 448(%r15)
adcq 456(%r10), %rax
movq 464(%r15), %rcx
movq %rax, 456(%r15)
adcq 464(%r10), %rcx
movq 472(%r15), %r8
movq %rcx, 464(%r15)
adcq 472(%r10), %r8
movq 480(%r15), %rax
movq %r8, 472(%r15)
adcq 480(%r10), %rax
movq 488(%r15), %rcx
movq %rax, 480(%r15)
adcq 488(%r10), %rcx
movq 496(%r15), %r8
movq %rcx, 488(%r15)
adcq 496(%r10), %r8
movq 504(%r15), %rax
movq %r8, 496(%r15)
adcq 504(%r10), %rax
movq %rax, 504(%r15)
adcq $0x00, %r9
movq %r9, 768(%rdi)
addq $0x100, %r15
# Add
movq (%r15), %rax
addq (%r11), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r11), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r11), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r11), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r11), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r11), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r11), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r11), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r11), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r11), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r11), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r11), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r11), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r11), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r11), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r11), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r11), %rcx
movq 136(%r15), %r8
movq %rcx, 128(%r15)
adcq 136(%r11), %r8
movq 144(%r15), %rax
movq %r8, 136(%r15)
adcq 144(%r11), %rax
movq 152(%r15), %rcx
movq %rax, 144(%r15)
adcq 152(%r11), %rcx
movq 160(%r15), %r8
movq %rcx, 152(%r15)
adcq 160(%r11), %r8
movq 168(%r15), %rax
movq %r8, 160(%r15)
adcq 168(%r11), %rax
movq 176(%r15), %rcx
movq %rax, 168(%r15)
adcq 176(%r11), %rcx
movq 184(%r15), %r8
movq %rcx, 176(%r15)
adcq 184(%r11), %r8
movq 192(%r15), %rax
movq %r8, 184(%r15)
adcq 192(%r11), %rax
movq 200(%r15), %rcx
movq %rax, 192(%r15)
adcq 200(%r11), %rcx
movq 208(%r15), %r8
movq %rcx, 200(%r15)
adcq 208(%r11), %r8
movq 216(%r15), %rax
movq %r8, 208(%r15)
adcq 216(%r11), %rax
movq 224(%r15), %rcx
movq %rax, 216(%r15)
adcq 224(%r11), %rcx
movq 232(%r15), %r8
movq %rcx, 224(%r15)
adcq 232(%r11), %r8
movq 240(%r15), %rax
movq %r8, 232(%r15)
adcq 240(%r11), %rax
movq 248(%r15), %rcx
movq %rax, 240(%r15)
adcq 248(%r11), %rcx
movq 256(%r15), %r8
movq %rcx, 248(%r15)
adcq 256(%r11), %r8
movq %r8, 256(%r15)
# Add to zero
movq 264(%r11), %rax
adcq $0x00, %rax
movq 272(%r11), %rcx
movq %rax, 264(%r15)
adcq $0x00, %rcx
movq 280(%r11), %r8
movq %rcx, 272(%r15)
adcq $0x00, %r8
movq 288(%r11), %rax
movq %r8, 280(%r15)
adcq $0x00, %rax
movq 296(%r11), %rcx
movq %rax, 288(%r15)
adcq $0x00, %rcx
movq 304(%r11), %r8
movq %rcx, 296(%r15)
adcq $0x00, %r8
movq 312(%r11), %rax
movq %r8, 304(%r15)
adcq $0x00, %rax
movq 320(%r11), %rcx
movq %rax, 312(%r15)
adcq $0x00, %rcx
movq 328(%r11), %r8
movq %rcx, 320(%r15)
adcq $0x00, %r8
movq 336(%r11), %rax
movq %r8, 328(%r15)
adcq $0x00, %rax
movq 344(%r11), %rcx
movq %rax, 336(%r15)
adcq $0x00, %rcx
movq 352(%r11), %r8
movq %rcx, 344(%r15)
adcq $0x00, %r8
movq 360(%r11), %rax
movq %r8, 352(%r15)
adcq $0x00, %rax
movq 368(%r11), %rcx
movq %rax, 360(%r15)
adcq $0x00, %rcx
movq 376(%r11), %r8
movq %rcx, 368(%r15)
adcq $0x00, %r8
movq 384(%r11), %rax
movq %r8, 376(%r15)
adcq $0x00, %rax
movq 392(%r11), %rcx
movq %rax, 384(%r15)
adcq $0x00, %rcx
movq 400(%r11), %r8
movq %rcx, 392(%r15)
adcq $0x00, %r8
movq 408(%r11), %rax
movq %r8, 400(%r15)
adcq $0x00, %rax
movq 416(%r11), %rcx
movq %rax, 408(%r15)
adcq $0x00, %rcx
movq 424(%r11), %r8
movq %rcx, 416(%r15)
adcq $0x00, %r8
movq 432(%r11), %rax
movq %r8, 424(%r15)
adcq $0x00, %rax
movq 440(%r11), %rcx
movq %rax, 432(%r15)
adcq $0x00, %rcx
movq 448(%r11), %r8
movq %rcx, 440(%r15)
adcq $0x00, %r8
movq 456(%r11), %rax
movq %r8, 448(%r15)
adcq $0x00, %rax
movq 464(%r11), %rcx
movq %rax, 456(%r15)
adcq $0x00, %rcx
movq 472(%r11), %r8
movq %rcx, 464(%r15)
adcq $0x00, %r8
movq 480(%r11), %rax
movq %r8, 472(%r15)
adcq $0x00, %rax
movq 488(%r11), %rcx
movq %rax, 480(%r15)
adcq $0x00, %rcx
movq 496(%r11), %r8
movq %rcx, 488(%r15)
adcq $0x00, %r8
movq 504(%r11), %rax
movq %r8, 496(%r15)
adcq $0x00, %rax
movq %rax, 504(%r15)
addq $0x628, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_4096_mul_64,.-sp_4096_mul_64
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_4096_mul_avx2_64
.type sp_4096_mul_avx2_64,@function
.align 16
sp_4096_mul_avx2_64:
#else
.section __TEXT,__text
.globl _sp_4096_mul_avx2_64
.p2align 4
_sp_4096_mul_avx2_64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x628, %rsp
movq %rdi, 1536(%rsp)
movq %rsi, 1544(%rsp)
movq %rdx, 1552(%rsp)
leaq 1024(%rsp), %r10
leaq 256(%rsi), %r12
# Add
movq (%rsi), %rax
xorq %r13, %r13
addq (%r12), %rax
movq 8(%rsi), %rcx
movq %rax, (%r10)
adcq 8(%r12), %rcx
movq 16(%rsi), %r8
movq %rcx, 8(%r10)
adcq 16(%r12), %r8
movq 24(%rsi), %rax
movq %r8, 16(%r10)
adcq 24(%r12), %rax
movq 32(%rsi), %rcx
movq %rax, 24(%r10)
adcq 32(%r12), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%r10)
adcq 40(%r12), %r8
movq 48(%rsi), %rax
movq %r8, 40(%r10)
adcq 48(%r12), %rax
movq 56(%rsi), %rcx
movq %rax, 48(%r10)
adcq 56(%r12), %rcx
movq 64(%rsi), %r8
movq %rcx, 56(%r10)
adcq 64(%r12), %r8
movq 72(%rsi), %rax
movq %r8, 64(%r10)
adcq 72(%r12), %rax
movq 80(%rsi), %rcx
movq %rax, 72(%r10)
adcq 80(%r12), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%r10)
adcq 88(%r12), %r8
movq 96(%rsi), %rax
movq %r8, 88(%r10)
adcq 96(%r12), %rax
movq 104(%rsi), %rcx
movq %rax, 96(%r10)
adcq 104(%r12), %rcx
movq 112(%rsi), %r8
movq %rcx, 104(%r10)
adcq 112(%r12), %r8
movq 120(%rsi), %rax
movq %r8, 112(%r10)
adcq 120(%r12), %rax
movq 128(%rsi), %rcx
movq %rax, 120(%r10)
adcq 128(%r12), %rcx
movq 136(%rsi), %r8
movq %rcx, 128(%r10)
adcq 136(%r12), %r8
movq 144(%rsi), %rax
movq %r8, 136(%r10)
adcq 144(%r12), %rax
movq 152(%rsi), %rcx
movq %rax, 144(%r10)
adcq 152(%r12), %rcx
movq 160(%rsi), %r8
movq %rcx, 152(%r10)
adcq 160(%r12), %r8
movq 168(%rsi), %rax
movq %r8, 160(%r10)
adcq 168(%r12), %rax
movq 176(%rsi), %rcx
movq %rax, 168(%r10)
adcq 176(%r12), %rcx
movq 184(%rsi), %r8
movq %rcx, 176(%r10)
adcq 184(%r12), %r8
movq 192(%rsi), %rax
movq %r8, 184(%r10)
adcq 192(%r12), %rax
movq 200(%rsi), %rcx
movq %rax, 192(%r10)
adcq 200(%r12), %rcx
movq 208(%rsi), %r8
movq %rcx, 200(%r10)
adcq 208(%r12), %r8
movq 216(%rsi), %rax
movq %r8, 208(%r10)
adcq 216(%r12), %rax
movq 224(%rsi), %rcx
movq %rax, 216(%r10)
adcq 224(%r12), %rcx
movq 232(%rsi), %r8
movq %rcx, 224(%r10)
adcq 232(%r12), %r8
movq 240(%rsi), %rax
movq %r8, 232(%r10)
adcq 240(%r12), %rax
movq 248(%rsi), %rcx
movq %rax, 240(%r10)
adcq 248(%r12), %rcx
movq %rcx, 248(%r10)
adcq $0x00, %r13
movq %r13, 1560(%rsp)
leaq 1280(%rsp), %r11
leaq 256(%rdx), %r12
# Add
movq (%rdx), %rax
xorq %r14, %r14
addq (%r12), %rax
movq 8(%rdx), %rcx
movq %rax, (%r11)
adcq 8(%r12), %rcx
movq 16(%rdx), %r8
movq %rcx, 8(%r11)
adcq 16(%r12), %r8
movq 24(%rdx), %rax
movq %r8, 16(%r11)
adcq 24(%r12), %rax
movq 32(%rdx), %rcx
movq %rax, 24(%r11)
adcq 32(%r12), %rcx
movq 40(%rdx), %r8
movq %rcx, 32(%r11)
adcq 40(%r12), %r8
movq 48(%rdx), %rax
movq %r8, 40(%r11)
adcq 48(%r12), %rax
movq 56(%rdx), %rcx
movq %rax, 48(%r11)
adcq 56(%r12), %rcx
movq 64(%rdx), %r8
movq %rcx, 56(%r11)
adcq 64(%r12), %r8
movq 72(%rdx), %rax
movq %r8, 64(%r11)
adcq 72(%r12), %rax
movq 80(%rdx), %rcx
movq %rax, 72(%r11)
adcq 80(%r12), %rcx
movq 88(%rdx), %r8
movq %rcx, 80(%r11)
adcq 88(%r12), %r8
movq 96(%rdx), %rax
movq %r8, 88(%r11)
adcq 96(%r12), %rax
movq 104(%rdx), %rcx
movq %rax, 96(%r11)
adcq 104(%r12), %rcx
movq 112(%rdx), %r8
movq %rcx, 104(%r11)
adcq 112(%r12), %r8
movq 120(%rdx), %rax
movq %r8, 112(%r11)
adcq 120(%r12), %rax
movq 128(%rdx), %rcx
movq %rax, 120(%r11)
adcq 128(%r12), %rcx
movq 136(%rdx), %r8
movq %rcx, 128(%r11)
adcq 136(%r12), %r8
movq 144(%rdx), %rax
movq %r8, 136(%r11)
adcq 144(%r12), %rax
movq 152(%rdx), %rcx
movq %rax, 144(%r11)
adcq 152(%r12), %rcx
movq 160(%rdx), %r8
movq %rcx, 152(%r11)
adcq 160(%r12), %r8
movq 168(%rdx), %rax
movq %r8, 160(%r11)
adcq 168(%r12), %rax
movq 176(%rdx), %rcx
movq %rax, 168(%r11)
adcq 176(%r12), %rcx
movq 184(%rdx), %r8
movq %rcx, 176(%r11)
adcq 184(%r12), %r8
movq 192(%rdx), %rax
movq %r8, 184(%r11)
adcq 192(%r12), %rax
movq 200(%rdx), %rcx
movq %rax, 192(%r11)
adcq 200(%r12), %rcx
movq 208(%rdx), %r8
movq %rcx, 200(%r11)
adcq 208(%r12), %r8
movq 216(%rdx), %rax
movq %r8, 208(%r11)
adcq 216(%r12), %rax
movq 224(%rdx), %rcx
movq %rax, 216(%r11)
adcq 224(%r12), %rcx
movq 232(%rdx), %r8
movq %rcx, 224(%r11)
adcq 232(%r12), %r8
movq 240(%rdx), %rax
movq %r8, 232(%r11)
adcq 240(%r12), %rax
movq 248(%rdx), %rcx
movq %rax, 240(%r11)
adcq 248(%r12), %rcx
movq %rcx, 248(%r11)
adcq $0x00, %r14
movq %r14, 1568(%rsp)
movq %r11, %rdx
movq %r10, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_2048_mul_avx2_32@plt
#else
callq _sp_2048_mul_avx2_32
#endif /* __APPLE__ */
movq 1552(%rsp), %rdx
movq 1544(%rsp), %rsi
leaq 512(%rsp), %rdi
addq $0x100, %rdx
addq $0x100, %rsi
#ifndef __APPLE__
callq sp_2048_mul_avx2_32@plt
#else
callq _sp_2048_mul_avx2_32
#endif /* __APPLE__ */
movq 1552(%rsp), %rdx
movq 1544(%rsp), %rsi
movq 1536(%rsp), %rdi
#ifndef __APPLE__
callq sp_2048_mul_avx2_32@plt
#else
callq _sp_2048_mul_avx2_32
#endif /* __APPLE__ */
#ifdef _WIN64
movq 1552(%rsp), %rdx
movq 1544(%rsp), %rsi
movq 1536(%rsp), %rdi
#endif /* _WIN64 */
movq 1560(%rsp), %r13
movq 1568(%rsp), %r14
movq 1536(%rsp), %r15
movq %r13, %r9
leaq 1024(%rsp), %r10
leaq 1280(%rsp), %r11
andq %r14, %r9
negq %r13
negq %r14
addq $0x200, %r15
movq (%r10), %rax
movq (%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
addq %rcx, %rax
movq 8(%r10), %rcx
movq 8(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, (%r15)
adcq %r8, %rcx
movq 16(%r10), %r8
movq 16(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 8(%r15)
adcq %rax, %r8
movq 24(%r10), %rax
movq 24(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 16(%r15)
adcq %rcx, %rax
movq 32(%r10), %rcx
movq 32(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 24(%r15)
adcq %r8, %rcx
movq 40(%r10), %r8
movq 40(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 32(%r15)
adcq %rax, %r8
movq 48(%r10), %rax
movq 48(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 40(%r15)
adcq %rcx, %rax
movq 56(%r10), %rcx
movq 56(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 48(%r15)
adcq %r8, %rcx
movq 64(%r10), %r8
movq 64(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 56(%r15)
adcq %rax, %r8
movq 72(%r10), %rax
movq 72(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 64(%r15)
adcq %rcx, %rax
movq 80(%r10), %rcx
movq 80(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 72(%r15)
adcq %r8, %rcx
movq 88(%r10), %r8
movq 88(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 80(%r15)
adcq %rax, %r8
movq 96(%r10), %rax
movq 96(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 88(%r15)
adcq %rcx, %rax
movq 104(%r10), %rcx
movq 104(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 96(%r15)
adcq %r8, %rcx
movq 112(%r10), %r8
movq 112(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 104(%r15)
adcq %rax, %r8
movq 120(%r10), %rax
movq 120(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 112(%r15)
adcq %rcx, %rax
movq 128(%r10), %rcx
movq 128(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 120(%r15)
adcq %r8, %rcx
movq 136(%r10), %r8
movq 136(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 128(%r15)
adcq %rax, %r8
movq 144(%r10), %rax
movq 144(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 136(%r15)
adcq %rcx, %rax
movq 152(%r10), %rcx
movq 152(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 144(%r15)
adcq %r8, %rcx
movq 160(%r10), %r8
movq 160(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 152(%r15)
adcq %rax, %r8
movq 168(%r10), %rax
movq 168(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 160(%r15)
adcq %rcx, %rax
movq 176(%r10), %rcx
movq 176(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 168(%r15)
adcq %r8, %rcx
movq 184(%r10), %r8
movq 184(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 176(%r15)
adcq %rax, %r8
movq 192(%r10), %rax
movq 192(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 184(%r15)
adcq %rcx, %rax
movq 200(%r10), %rcx
movq 200(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 192(%r15)
adcq %r8, %rcx
movq 208(%r10), %r8
movq 208(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 200(%r15)
adcq %rax, %r8
movq 216(%r10), %rax
movq 216(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 208(%r15)
adcq %rcx, %rax
movq 224(%r10), %rcx
movq 224(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 216(%r15)
adcq %r8, %rcx
movq 232(%r10), %r8
movq 232(%r11), %rax
pextq %r14, %r8, %r8
pextq %r13, %rax, %rax
movq %rcx, 224(%r15)
adcq %rax, %r8
movq 240(%r10), %rax
movq 240(%r11), %rcx
pextq %r14, %rax, %rax
pextq %r13, %rcx, %rcx
movq %r8, 232(%r15)
adcq %rcx, %rax
movq 248(%r10), %rcx
movq 248(%r11), %r8
pextq %r14, %rcx, %rcx
pextq %r13, %r8, %r8
movq %rax, 240(%r15)
adcq %r8, %rcx
movq %rcx, 248(%r15)
adcq $0x00, %r9
leaq 512(%rsp), %r11
movq %rsp, %r10
movq (%r10), %rax
subq (%r11), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%r11), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%r11), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%r11), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%r11), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%r11), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%r11), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%r11), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%r11), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%r11), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%r11), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%r11), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%r11), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%r11), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%r11), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%r11), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%r11), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%r11), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%r11), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%r11), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%r11), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%r11), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%r11), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%r11), %r8
movq 192(%r10), %rax
movq %r8, 184(%r10)
sbbq 192(%r11), %rax
movq 200(%r10), %rcx
movq %rax, 192(%r10)
sbbq 200(%r11), %rcx
movq 208(%r10), %r8
movq %rcx, 200(%r10)
sbbq 208(%r11), %r8
movq 216(%r10), %rax
movq %r8, 208(%r10)
sbbq 216(%r11), %rax
movq 224(%r10), %rcx
movq %rax, 216(%r10)
sbbq 224(%r11), %rcx
movq 232(%r10), %r8
movq %rcx, 224(%r10)
sbbq 232(%r11), %r8
movq 240(%r10), %rax
movq %r8, 232(%r10)
sbbq 240(%r11), %rax
movq 248(%r10), %rcx
movq %rax, 240(%r10)
sbbq 248(%r11), %rcx
movq 256(%r10), %r8
movq %rcx, 248(%r10)
sbbq 256(%r11), %r8
movq 264(%r10), %rax
movq %r8, 256(%r10)
sbbq 264(%r11), %rax
movq 272(%r10), %rcx
movq %rax, 264(%r10)
sbbq 272(%r11), %rcx
movq 280(%r10), %r8
movq %rcx, 272(%r10)
sbbq 280(%r11), %r8
movq 288(%r10), %rax
movq %r8, 280(%r10)
sbbq 288(%r11), %rax
movq 296(%r10), %rcx
movq %rax, 288(%r10)
sbbq 296(%r11), %rcx
movq 304(%r10), %r8
movq %rcx, 296(%r10)
sbbq 304(%r11), %r8
movq 312(%r10), %rax
movq %r8, 304(%r10)
sbbq 312(%r11), %rax
movq 320(%r10), %rcx
movq %rax, 312(%r10)
sbbq 320(%r11), %rcx
movq 328(%r10), %r8
movq %rcx, 320(%r10)
sbbq 328(%r11), %r8
movq 336(%r10), %rax
movq %r8, 328(%r10)
sbbq 336(%r11), %rax
movq 344(%r10), %rcx
movq %rax, 336(%r10)
sbbq 344(%r11), %rcx
movq 352(%r10), %r8
movq %rcx, 344(%r10)
sbbq 352(%r11), %r8
movq 360(%r10), %rax
movq %r8, 352(%r10)
sbbq 360(%r11), %rax
movq 368(%r10), %rcx
movq %rax, 360(%r10)
sbbq 368(%r11), %rcx
movq 376(%r10), %r8
movq %rcx, 368(%r10)
sbbq 376(%r11), %r8
movq 384(%r10), %rax
movq %r8, 376(%r10)
sbbq 384(%r11), %rax
movq 392(%r10), %rcx
movq %rax, 384(%r10)
sbbq 392(%r11), %rcx
movq 400(%r10), %r8
movq %rcx, 392(%r10)
sbbq 400(%r11), %r8
movq 408(%r10), %rax
movq %r8, 400(%r10)
sbbq 408(%r11), %rax
movq 416(%r10), %rcx
movq %rax, 408(%r10)
sbbq 416(%r11), %rcx
movq 424(%r10), %r8
movq %rcx, 416(%r10)
sbbq 424(%r11), %r8
movq 432(%r10), %rax
movq %r8, 424(%r10)
sbbq 432(%r11), %rax
movq 440(%r10), %rcx
movq %rax, 432(%r10)
sbbq 440(%r11), %rcx
movq 448(%r10), %r8
movq %rcx, 440(%r10)
sbbq 448(%r11), %r8
movq 456(%r10), %rax
movq %r8, 448(%r10)
sbbq 456(%r11), %rax
movq 464(%r10), %rcx
movq %rax, 456(%r10)
sbbq 464(%r11), %rcx
movq 472(%r10), %r8
movq %rcx, 464(%r10)
sbbq 472(%r11), %r8
movq 480(%r10), %rax
movq %r8, 472(%r10)
sbbq 480(%r11), %rax
movq 488(%r10), %rcx
movq %rax, 480(%r10)
sbbq 488(%r11), %rcx
movq 496(%r10), %r8
movq %rcx, 488(%r10)
sbbq 496(%r11), %r8
movq 504(%r10), %rax
movq %r8, 496(%r10)
sbbq 504(%r11), %rax
movq %rax, 504(%r10)
sbbq $0x00, %r9
movq (%r10), %rax
subq (%rdi), %rax
movq 8(%r10), %rcx
movq %rax, (%r10)
sbbq 8(%rdi), %rcx
movq 16(%r10), %r8
movq %rcx, 8(%r10)
sbbq 16(%rdi), %r8
movq 24(%r10), %rax
movq %r8, 16(%r10)
sbbq 24(%rdi), %rax
movq 32(%r10), %rcx
movq %rax, 24(%r10)
sbbq 32(%rdi), %rcx
movq 40(%r10), %r8
movq %rcx, 32(%r10)
sbbq 40(%rdi), %r8
movq 48(%r10), %rax
movq %r8, 40(%r10)
sbbq 48(%rdi), %rax
movq 56(%r10), %rcx
movq %rax, 48(%r10)
sbbq 56(%rdi), %rcx
movq 64(%r10), %r8
movq %rcx, 56(%r10)
sbbq 64(%rdi), %r8
movq 72(%r10), %rax
movq %r8, 64(%r10)
sbbq 72(%rdi), %rax
movq 80(%r10), %rcx
movq %rax, 72(%r10)
sbbq 80(%rdi), %rcx
movq 88(%r10), %r8
movq %rcx, 80(%r10)
sbbq 88(%rdi), %r8
movq 96(%r10), %rax
movq %r8, 88(%r10)
sbbq 96(%rdi), %rax
movq 104(%r10), %rcx
movq %rax, 96(%r10)
sbbq 104(%rdi), %rcx
movq 112(%r10), %r8
movq %rcx, 104(%r10)
sbbq 112(%rdi), %r8
movq 120(%r10), %rax
movq %r8, 112(%r10)
sbbq 120(%rdi), %rax
movq 128(%r10), %rcx
movq %rax, 120(%r10)
sbbq 128(%rdi), %rcx
movq 136(%r10), %r8
movq %rcx, 128(%r10)
sbbq 136(%rdi), %r8
movq 144(%r10), %rax
movq %r8, 136(%r10)
sbbq 144(%rdi), %rax
movq 152(%r10), %rcx
movq %rax, 144(%r10)
sbbq 152(%rdi), %rcx
movq 160(%r10), %r8
movq %rcx, 152(%r10)
sbbq 160(%rdi), %r8
movq 168(%r10), %rax
movq %r8, 160(%r10)
sbbq 168(%rdi), %rax
movq 176(%r10), %rcx
movq %rax, 168(%r10)
sbbq 176(%rdi), %rcx
movq 184(%r10), %r8
movq %rcx, 176(%r10)
sbbq 184(%rdi), %r8
movq 192(%r10), %rax
movq %r8, 184(%r10)
sbbq 192(%rdi), %rax
movq 200(%r10), %rcx
movq %rax, 192(%r10)
sbbq 200(%rdi), %rcx
movq 208(%r10), %r8
movq %rcx, 200(%r10)
sbbq 208(%rdi), %r8
movq 216(%r10), %rax
movq %r8, 208(%r10)
sbbq 216(%rdi), %rax
movq 224(%r10), %rcx
movq %rax, 216(%r10)
sbbq 224(%rdi), %rcx
movq 232(%r10), %r8
movq %rcx, 224(%r10)
sbbq 232(%rdi), %r8
movq 240(%r10), %rax
movq %r8, 232(%r10)
sbbq 240(%rdi), %rax
movq 248(%r10), %rcx
movq %rax, 240(%r10)
sbbq 248(%rdi), %rcx
movq 256(%r10), %r8
movq %rcx, 248(%r10)
sbbq 256(%rdi), %r8
movq 264(%r10), %rax
movq %r8, 256(%r10)
sbbq 264(%rdi), %rax
movq 272(%r10), %rcx
movq %rax, 264(%r10)
sbbq 272(%rdi), %rcx
movq 280(%r10), %r8
movq %rcx, 272(%r10)
sbbq 280(%rdi), %r8
movq 288(%r10), %rax
movq %r8, 280(%r10)
sbbq 288(%rdi), %rax
movq 296(%r10), %rcx
movq %rax, 288(%r10)
sbbq 296(%rdi), %rcx
movq 304(%r10), %r8
movq %rcx, 296(%r10)
sbbq 304(%rdi), %r8
movq 312(%r10), %rax
movq %r8, 304(%r10)
sbbq 312(%rdi), %rax
movq 320(%r10), %rcx
movq %rax, 312(%r10)
sbbq 320(%rdi), %rcx
movq 328(%r10), %r8
movq %rcx, 320(%r10)
sbbq 328(%rdi), %r8
movq 336(%r10), %rax
movq %r8, 328(%r10)
sbbq 336(%rdi), %rax
movq 344(%r10), %rcx
movq %rax, 336(%r10)
sbbq 344(%rdi), %rcx
movq 352(%r10), %r8
movq %rcx, 344(%r10)
sbbq 352(%rdi), %r8
movq 360(%r10), %rax
movq %r8, 352(%r10)
sbbq 360(%rdi), %rax
movq 368(%r10), %rcx
movq %rax, 360(%r10)
sbbq 368(%rdi), %rcx
movq 376(%r10), %r8
movq %rcx, 368(%r10)
sbbq 376(%rdi), %r8
movq 384(%r10), %rax
movq %r8, 376(%r10)
sbbq 384(%rdi), %rax
movq 392(%r10), %rcx
movq %rax, 384(%r10)
sbbq 392(%rdi), %rcx
movq 400(%r10), %r8
movq %rcx, 392(%r10)
sbbq 400(%rdi), %r8
movq 408(%r10), %rax
movq %r8, 400(%r10)
sbbq 408(%rdi), %rax
movq 416(%r10), %rcx
movq %rax, 408(%r10)
sbbq 416(%rdi), %rcx
movq 424(%r10), %r8
movq %rcx, 416(%r10)
sbbq 424(%rdi), %r8
movq 432(%r10), %rax
movq %r8, 424(%r10)
sbbq 432(%rdi), %rax
movq 440(%r10), %rcx
movq %rax, 432(%r10)
sbbq 440(%rdi), %rcx
movq 448(%r10), %r8
movq %rcx, 440(%r10)
sbbq 448(%rdi), %r8
movq 456(%r10), %rax
movq %r8, 448(%r10)
sbbq 456(%rdi), %rax
movq 464(%r10), %rcx
movq %rax, 456(%r10)
sbbq 464(%rdi), %rcx
movq 472(%r10), %r8
movq %rcx, 464(%r10)
sbbq 472(%rdi), %r8
movq 480(%r10), %rax
movq %r8, 472(%r10)
sbbq 480(%rdi), %rax
movq 488(%r10), %rcx
movq %rax, 480(%r10)
sbbq 488(%rdi), %rcx
movq 496(%r10), %r8
movq %rcx, 488(%r10)
sbbq 496(%rdi), %r8
movq 504(%r10), %rax
movq %r8, 496(%r10)
sbbq 504(%rdi), %rax
movq %rax, 504(%r10)
sbbq $0x00, %r9
subq $0x100, %r15
# Add
movq (%r15), %rax
addq (%r10), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r10), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r10), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r10), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r10), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r10), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r10), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r10), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r10), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r10), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r10), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r10), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r10), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r10), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r10), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r10), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r10), %rcx
movq 136(%r15), %r8
movq %rcx, 128(%r15)
adcq 136(%r10), %r8
movq 144(%r15), %rax
movq %r8, 136(%r15)
adcq 144(%r10), %rax
movq 152(%r15), %rcx
movq %rax, 144(%r15)
adcq 152(%r10), %rcx
movq 160(%r15), %r8
movq %rcx, 152(%r15)
adcq 160(%r10), %r8
movq 168(%r15), %rax
movq %r8, 160(%r15)
adcq 168(%r10), %rax
movq 176(%r15), %rcx
movq %rax, 168(%r15)
adcq 176(%r10), %rcx
movq 184(%r15), %r8
movq %rcx, 176(%r15)
adcq 184(%r10), %r8
movq 192(%r15), %rax
movq %r8, 184(%r15)
adcq 192(%r10), %rax
movq 200(%r15), %rcx
movq %rax, 192(%r15)
adcq 200(%r10), %rcx
movq 208(%r15), %r8
movq %rcx, 200(%r15)
adcq 208(%r10), %r8
movq 216(%r15), %rax
movq %r8, 208(%r15)
adcq 216(%r10), %rax
movq 224(%r15), %rcx
movq %rax, 216(%r15)
adcq 224(%r10), %rcx
movq 232(%r15), %r8
movq %rcx, 224(%r15)
adcq 232(%r10), %r8
movq 240(%r15), %rax
movq %r8, 232(%r15)
adcq 240(%r10), %rax
movq 248(%r15), %rcx
movq %rax, 240(%r15)
adcq 248(%r10), %rcx
movq 256(%r15), %r8
movq %rcx, 248(%r15)
adcq 256(%r10), %r8
movq 264(%r15), %rax
movq %r8, 256(%r15)
adcq 264(%r10), %rax
movq 272(%r15), %rcx
movq %rax, 264(%r15)
adcq 272(%r10), %rcx
movq 280(%r15), %r8
movq %rcx, 272(%r15)
adcq 280(%r10), %r8
movq 288(%r15), %rax
movq %r8, 280(%r15)
adcq 288(%r10), %rax
movq 296(%r15), %rcx
movq %rax, 288(%r15)
adcq 296(%r10), %rcx
movq 304(%r15), %r8
movq %rcx, 296(%r15)
adcq 304(%r10), %r8
movq 312(%r15), %rax
movq %r8, 304(%r15)
adcq 312(%r10), %rax
movq 320(%r15), %rcx
movq %rax, 312(%r15)
adcq 320(%r10), %rcx
movq 328(%r15), %r8
movq %rcx, 320(%r15)
adcq 328(%r10), %r8
movq 336(%r15), %rax
movq %r8, 328(%r15)
adcq 336(%r10), %rax
movq 344(%r15), %rcx
movq %rax, 336(%r15)
adcq 344(%r10), %rcx
movq 352(%r15), %r8
movq %rcx, 344(%r15)
adcq 352(%r10), %r8
movq 360(%r15), %rax
movq %r8, 352(%r15)
adcq 360(%r10), %rax
movq 368(%r15), %rcx
movq %rax, 360(%r15)
adcq 368(%r10), %rcx
movq 376(%r15), %r8
movq %rcx, 368(%r15)
adcq 376(%r10), %r8
movq 384(%r15), %rax
movq %r8, 376(%r15)
adcq 384(%r10), %rax
movq 392(%r15), %rcx
movq %rax, 384(%r15)
adcq 392(%r10), %rcx
movq 400(%r15), %r8
movq %rcx, 392(%r15)
adcq 400(%r10), %r8
movq 408(%r15), %rax
movq %r8, 400(%r15)
adcq 408(%r10), %rax
movq 416(%r15), %rcx
movq %rax, 408(%r15)
adcq 416(%r10), %rcx
movq 424(%r15), %r8
movq %rcx, 416(%r15)
adcq 424(%r10), %r8
movq 432(%r15), %rax
movq %r8, 424(%r15)
adcq 432(%r10), %rax
movq 440(%r15), %rcx
movq %rax, 432(%r15)
adcq 440(%r10), %rcx
movq 448(%r15), %r8
movq %rcx, 440(%r15)
adcq 448(%r10), %r8
movq 456(%r15), %rax
movq %r8, 448(%r15)
adcq 456(%r10), %rax
movq 464(%r15), %rcx
movq %rax, 456(%r15)
adcq 464(%r10), %rcx
movq 472(%r15), %r8
movq %rcx, 464(%r15)
adcq 472(%r10), %r8
movq 480(%r15), %rax
movq %r8, 472(%r15)
adcq 480(%r10), %rax
movq 488(%r15), %rcx
movq %rax, 480(%r15)
adcq 488(%r10), %rcx
movq 496(%r15), %r8
movq %rcx, 488(%r15)
adcq 496(%r10), %r8
movq 504(%r15), %rax
movq %r8, 496(%r15)
adcq 504(%r10), %rax
movq %rax, 504(%r15)
adcq $0x00, %r9
movq %r9, 768(%rdi)
addq $0x100, %r15
# Add
movq (%r15), %rax
addq (%r11), %rax
movq 8(%r15), %rcx
movq %rax, (%r15)
adcq 8(%r11), %rcx
movq 16(%r15), %r8
movq %rcx, 8(%r15)
adcq 16(%r11), %r8
movq 24(%r15), %rax
movq %r8, 16(%r15)
adcq 24(%r11), %rax
movq 32(%r15), %rcx
movq %rax, 24(%r15)
adcq 32(%r11), %rcx
movq 40(%r15), %r8
movq %rcx, 32(%r15)
adcq 40(%r11), %r8
movq 48(%r15), %rax
movq %r8, 40(%r15)
adcq 48(%r11), %rax
movq 56(%r15), %rcx
movq %rax, 48(%r15)
adcq 56(%r11), %rcx
movq 64(%r15), %r8
movq %rcx, 56(%r15)
adcq 64(%r11), %r8
movq 72(%r15), %rax
movq %r8, 64(%r15)
adcq 72(%r11), %rax
movq 80(%r15), %rcx
movq %rax, 72(%r15)
adcq 80(%r11), %rcx
movq 88(%r15), %r8
movq %rcx, 80(%r15)
adcq 88(%r11), %r8
movq 96(%r15), %rax
movq %r8, 88(%r15)
adcq 96(%r11), %rax
movq 104(%r15), %rcx
movq %rax, 96(%r15)
adcq 104(%r11), %rcx
movq 112(%r15), %r8
movq %rcx, 104(%r15)
adcq 112(%r11), %r8
movq 120(%r15), %rax
movq %r8, 112(%r15)
adcq 120(%r11), %rax
movq 128(%r15), %rcx
movq %rax, 120(%r15)
adcq 128(%r11), %rcx
movq 136(%r15), %r8
movq %rcx, 128(%r15)
adcq 136(%r11), %r8
movq 144(%r15), %rax
movq %r8, 136(%r15)
adcq 144(%r11), %rax
movq 152(%r15), %rcx
movq %rax, 144(%r15)
adcq 152(%r11), %rcx
movq 160(%r15), %r8
movq %rcx, 152(%r15)
adcq 160(%r11), %r8
movq 168(%r15), %rax
movq %r8, 160(%r15)
adcq 168(%r11), %rax
movq 176(%r15), %rcx
movq %rax, 168(%r15)
adcq 176(%r11), %rcx
movq 184(%r15), %r8
movq %rcx, 176(%r15)
adcq 184(%r11), %r8
movq 192(%r15), %rax
movq %r8, 184(%r15)
adcq 192(%r11), %rax
movq 200(%r15), %rcx
movq %rax, 192(%r15)
adcq 200(%r11), %rcx
movq 208(%r15), %r8
movq %rcx, 200(%r15)
adcq 208(%r11), %r8
movq 216(%r15), %rax
movq %r8, 208(%r15)
adcq 216(%r11), %rax
movq 224(%r15), %rcx
movq %rax, 216(%r15)
adcq 224(%r11), %rcx
movq 232(%r15), %r8
movq %rcx, 224(%r15)
adcq 232(%r11), %r8
movq 240(%r15), %rax
movq %r8, 232(%r15)
adcq 240(%r11), %rax
movq 248(%r15), %rcx
movq %rax, 240(%r15)
adcq 248(%r11), %rcx
movq 256(%r15), %r8
movq %rcx, 248(%r15)
adcq 256(%r11), %r8
movq %r8, 256(%r15)
# Add to zero
movq 264(%r11), %rax
adcq $0x00, %rax
movq 272(%r11), %rcx
movq %rax, 264(%r15)
adcq $0x00, %rcx
movq 280(%r11), %r8
movq %rcx, 272(%r15)
adcq $0x00, %r8
movq 288(%r11), %rax
movq %r8, 280(%r15)
adcq $0x00, %rax
movq 296(%r11), %rcx
movq %rax, 288(%r15)
adcq $0x00, %rcx
movq 304(%r11), %r8
movq %rcx, 296(%r15)
adcq $0x00, %r8
movq 312(%r11), %rax
movq %r8, 304(%r15)
adcq $0x00, %rax
movq 320(%r11), %rcx
movq %rax, 312(%r15)
adcq $0x00, %rcx
movq 328(%r11), %r8
movq %rcx, 320(%r15)
adcq $0x00, %r8
movq 336(%r11), %rax
movq %r8, 328(%r15)
adcq $0x00, %rax
movq 344(%r11), %rcx
movq %rax, 336(%r15)
adcq $0x00, %rcx
movq 352(%r11), %r8
movq %rcx, 344(%r15)
adcq $0x00, %r8
movq 360(%r11), %rax
movq %r8, 352(%r15)
adcq $0x00, %rax
movq 368(%r11), %rcx
movq %rax, 360(%r15)
adcq $0x00, %rcx
movq 376(%r11), %r8
movq %rcx, 368(%r15)
adcq $0x00, %r8
movq 384(%r11), %rax
movq %r8, 376(%r15)
adcq $0x00, %rax
movq 392(%r11), %rcx
movq %rax, 384(%r15)
adcq $0x00, %rcx
movq 400(%r11), %r8
movq %rcx, 392(%r15)
adcq $0x00, %r8
movq 408(%r11), %rax
movq %r8, 400(%r15)
adcq $0x00, %rax
movq 416(%r11), %rcx
movq %rax, 408(%r15)
adcq $0x00, %rcx
movq 424(%r11), %r8
movq %rcx, 416(%r15)
adcq $0x00, %r8
movq 432(%r11), %rax
movq %r8, 424(%r15)
adcq $0x00, %rax
movq 440(%r11), %rcx
movq %rax, 432(%r15)
adcq $0x00, %rcx
movq 448(%r11), %r8
movq %rcx, 440(%r15)
adcq $0x00, %r8
movq 456(%r11), %rax
movq %r8, 448(%r15)
adcq $0x00, %rax
movq 464(%r11), %rcx
movq %rax, 456(%r15)
adcq $0x00, %rcx
movq 472(%r11), %r8
movq %rcx, 464(%r15)
adcq $0x00, %r8
movq 480(%r11), %rax
movq %r8, 472(%r15)
adcq $0x00, %rax
movq 488(%r11), %rcx
movq %rax, 480(%r15)
adcq $0x00, %rcx
movq 496(%r11), %r8
movq %rcx, 488(%r15)
adcq $0x00, %r8
movq 504(%r11), %rax
movq %r8, 496(%r15)
adcq $0x00, %rax
movq %rax, 504(%r15)
addq $0x628, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_4096_mul_avx2_64,.-sp_4096_mul_avx2_64
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Square a and put result in r. (r = a * a)
*
* Karatsuba: ah^2, al^2, (al - ah)^2
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_4096_sqr_64
.type sp_4096_sqr_64,@function
.align 16
sp_4096_sqr_64:
#else
.section __TEXT,__text
.globl _sp_4096_sqr_64
.p2align 4
_sp_4096_sqr_64:
#endif /* __APPLE__ */
subq $0x210, %rsp
movq %rdi, 512(%rsp)
movq %rsi, 520(%rsp)
movq $0x00, %rcx
movq %rsp, %r8
leaq 256(%rsi), %r9
movq (%rsi), %rdx
subq (%r9), %rdx
movq 8(%rsi), %rax
movq %rdx, (%r8)
sbbq 8(%r9), %rax
movq 16(%rsi), %rdx
movq %rax, 8(%r8)
sbbq 16(%r9), %rdx
movq 24(%rsi), %rax
movq %rdx, 16(%r8)
sbbq 24(%r9), %rax
movq 32(%rsi), %rdx
movq %rax, 24(%r8)
sbbq 32(%r9), %rdx
movq 40(%rsi), %rax
movq %rdx, 32(%r8)
sbbq 40(%r9), %rax
movq 48(%rsi), %rdx
movq %rax, 40(%r8)
sbbq 48(%r9), %rdx
movq 56(%rsi), %rax
movq %rdx, 48(%r8)
sbbq 56(%r9), %rax
movq 64(%rsi), %rdx
movq %rax, 56(%r8)
sbbq 64(%r9), %rdx
movq 72(%rsi), %rax
movq %rdx, 64(%r8)
sbbq 72(%r9), %rax
movq 80(%rsi), %rdx
movq %rax, 72(%r8)
sbbq 80(%r9), %rdx
movq 88(%rsi), %rax
movq %rdx, 80(%r8)
sbbq 88(%r9), %rax
movq 96(%rsi), %rdx
movq %rax, 88(%r8)
sbbq 96(%r9), %rdx
movq 104(%rsi), %rax
movq %rdx, 96(%r8)
sbbq 104(%r9), %rax
movq 112(%rsi), %rdx
movq %rax, 104(%r8)
sbbq 112(%r9), %rdx
movq 120(%rsi), %rax
movq %rdx, 112(%r8)
sbbq 120(%r9), %rax
movq 128(%rsi), %rdx
movq %rax, 120(%r8)
sbbq 128(%r9), %rdx
movq 136(%rsi), %rax
movq %rdx, 128(%r8)
sbbq 136(%r9), %rax
movq 144(%rsi), %rdx
movq %rax, 136(%r8)
sbbq 144(%r9), %rdx
movq 152(%rsi), %rax
movq %rdx, 144(%r8)
sbbq 152(%r9), %rax
movq 160(%rsi), %rdx
movq %rax, 152(%r8)
sbbq 160(%r9), %rdx
movq 168(%rsi), %rax
movq %rdx, 160(%r8)
sbbq 168(%r9), %rax
movq 176(%rsi), %rdx
movq %rax, 168(%r8)
sbbq 176(%r9), %rdx
movq 184(%rsi), %rax
movq %rdx, 176(%r8)
sbbq 184(%r9), %rax
movq 192(%rsi), %rdx
movq %rax, 184(%r8)
sbbq 192(%r9), %rdx
movq 200(%rsi), %rax
movq %rdx, 192(%r8)
sbbq 200(%r9), %rax
movq 208(%rsi), %rdx
movq %rax, 200(%r8)
sbbq 208(%r9), %rdx
movq 216(%rsi), %rax
movq %rdx, 208(%r8)
sbbq 216(%r9), %rax
movq 224(%rsi), %rdx
movq %rax, 216(%r8)
sbbq 224(%r9), %rdx
movq 232(%rsi), %rax
movq %rdx, 224(%r8)
sbbq 232(%r9), %rax
movq 240(%rsi), %rdx
movq %rax, 232(%r8)
sbbq 240(%r9), %rdx
movq 248(%rsi), %rax
movq %rdx, 240(%r8)
sbbq 248(%r9), %rax
movq %rax, 248(%r8)
sbbq $0x00, %rcx
# Cond Negate
movq (%r8), %rdx
movq %rcx, %r9
xorq %rcx, %rdx
negq %r9
subq %rcx, %rdx
movq 8(%r8), %rax
sbbq $0x00, %r9
movq %rdx, (%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 16(%r8), %rdx
setc %r9b
movq %rax, 8(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 24(%r8), %rax
setc %r9b
movq %rdx, 16(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 32(%r8), %rdx
setc %r9b
movq %rax, 24(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 40(%r8), %rax
setc %r9b
movq %rdx, 32(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 48(%r8), %rdx
setc %r9b
movq %rax, 40(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 56(%r8), %rax
setc %r9b
movq %rdx, 48(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 64(%r8), %rdx
setc %r9b
movq %rax, 56(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 72(%r8), %rax
setc %r9b
movq %rdx, 64(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 80(%r8), %rdx
setc %r9b
movq %rax, 72(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 88(%r8), %rax
setc %r9b
movq %rdx, 80(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 96(%r8), %rdx
setc %r9b
movq %rax, 88(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 104(%r8), %rax
setc %r9b
movq %rdx, 96(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 112(%r8), %rdx
setc %r9b
movq %rax, 104(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 120(%r8), %rax
setc %r9b
movq %rdx, 112(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 128(%r8), %rdx
setc %r9b
movq %rax, 120(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 136(%r8), %rax
setc %r9b
movq %rdx, 128(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 144(%r8), %rdx
setc %r9b
movq %rax, 136(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 152(%r8), %rax
setc %r9b
movq %rdx, 144(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 160(%r8), %rdx
setc %r9b
movq %rax, 152(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 168(%r8), %rax
setc %r9b
movq %rdx, 160(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 176(%r8), %rdx
setc %r9b
movq %rax, 168(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 184(%r8), %rax
setc %r9b
movq %rdx, 176(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 192(%r8), %rdx
setc %r9b
movq %rax, 184(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 200(%r8), %rax
setc %r9b
movq %rdx, 192(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 208(%r8), %rdx
setc %r9b
movq %rax, 200(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 216(%r8), %rax
setc %r9b
movq %rdx, 208(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 224(%r8), %rdx
setc %r9b
movq %rax, 216(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 232(%r8), %rax
setc %r9b
movq %rdx, 224(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 240(%r8), %rdx
setc %r9b
movq %rax, 232(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 248(%r8), %rax
setc %r9b
movq %rdx, 240(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq %rax, 248(%r8)
movq %r8, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_2048_sqr_32@plt
#else
callq _sp_2048_sqr_32
#endif /* __APPLE__ */
movq 520(%rsp), %rsi
movq 512(%rsp), %rdi
addq $0x100, %rsi
addq $0x200, %rdi
#ifndef __APPLE__
callq sp_2048_sqr_32@plt
#else
callq _sp_2048_sqr_32
#endif /* __APPLE__ */
movq 520(%rsp), %rsi
movq 512(%rsp), %rdi
#ifndef __APPLE__
callq sp_2048_sqr_32@plt
#else
callq _sp_2048_sqr_32
#endif /* __APPLE__ */
#ifdef _WIN64
movq 520(%rsp), %rsi
movq 512(%rsp), %rdi
#endif /* _WIN64 */
movq 512(%rsp), %rsi
leaq 256(%rsp), %r8
addq $0x300, %rsi
movq $0x00, %rcx
movq -256(%r8), %rax
subq -256(%rsi), %rax
movq -248(%r8), %rdx
movq %rax, -256(%r8)
sbbq -248(%rsi), %rdx
movq -240(%r8), %rax
movq %rdx, -248(%r8)
sbbq -240(%rsi), %rax
movq -232(%r8), %rdx
movq %rax, -240(%r8)
sbbq -232(%rsi), %rdx
movq -224(%r8), %rax
movq %rdx, -232(%r8)
sbbq -224(%rsi), %rax
movq -216(%r8), %rdx
movq %rax, -224(%r8)
sbbq -216(%rsi), %rdx
movq -208(%r8), %rax
movq %rdx, -216(%r8)
sbbq -208(%rsi), %rax
movq -200(%r8), %rdx
movq %rax, -208(%r8)
sbbq -200(%rsi), %rdx
movq -192(%r8), %rax
movq %rdx, -200(%r8)
sbbq -192(%rsi), %rax
movq -184(%r8), %rdx
movq %rax, -192(%r8)
sbbq -184(%rsi), %rdx
movq -176(%r8), %rax
movq %rdx, -184(%r8)
sbbq -176(%rsi), %rax
movq -168(%r8), %rdx
movq %rax, -176(%r8)
sbbq -168(%rsi), %rdx
movq -160(%r8), %rax
movq %rdx, -168(%r8)
sbbq -160(%rsi), %rax
movq -152(%r8), %rdx
movq %rax, -160(%r8)
sbbq -152(%rsi), %rdx
movq -144(%r8), %rax
movq %rdx, -152(%r8)
sbbq -144(%rsi), %rax
movq -136(%r8), %rdx
movq %rax, -144(%r8)
sbbq -136(%rsi), %rdx
movq -128(%r8), %rax
movq %rdx, -136(%r8)
sbbq -128(%rsi), %rax
movq -120(%r8), %rdx
movq %rax, -128(%r8)
sbbq -120(%rsi), %rdx
movq -112(%r8), %rax
movq %rdx, -120(%r8)
sbbq -112(%rsi), %rax
movq -104(%r8), %rdx
movq %rax, -112(%r8)
sbbq -104(%rsi), %rdx
movq -96(%r8), %rax
movq %rdx, -104(%r8)
sbbq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq 96(%r8), %rax
movq %rdx, 88(%r8)
sbbq 96(%rsi), %rax
movq 104(%r8), %rdx
movq %rax, 96(%r8)
sbbq 104(%rsi), %rdx
movq 112(%r8), %rax
movq %rdx, 104(%r8)
sbbq 112(%rsi), %rax
movq 120(%r8), %rdx
movq %rax, 112(%r8)
sbbq 120(%rsi), %rdx
movq 128(%r8), %rax
movq %rdx, 120(%r8)
sbbq 128(%rsi), %rax
movq 136(%r8), %rdx
movq %rax, 128(%r8)
sbbq 136(%rsi), %rdx
movq 144(%r8), %rax
movq %rdx, 136(%r8)
sbbq 144(%rsi), %rax
movq 152(%r8), %rdx
movq %rax, 144(%r8)
sbbq 152(%rsi), %rdx
movq 160(%r8), %rax
movq %rdx, 152(%r8)
sbbq 160(%rsi), %rax
movq 168(%r8), %rdx
movq %rax, 160(%r8)
sbbq 168(%rsi), %rdx
movq 176(%r8), %rax
movq %rdx, 168(%r8)
sbbq 176(%rsi), %rax
movq 184(%r8), %rdx
movq %rax, 176(%r8)
sbbq 184(%rsi), %rdx
movq 192(%r8), %rax
movq %rdx, 184(%r8)
sbbq 192(%rsi), %rax
movq 200(%r8), %rdx
movq %rax, 192(%r8)
sbbq 200(%rsi), %rdx
movq 208(%r8), %rax
movq %rdx, 200(%r8)
sbbq 208(%rsi), %rax
movq 216(%r8), %rdx
movq %rax, 208(%r8)
sbbq 216(%rsi), %rdx
movq 224(%r8), %rax
movq %rdx, 216(%r8)
sbbq 224(%rsi), %rax
movq 232(%r8), %rdx
movq %rax, 224(%r8)
sbbq 232(%rsi), %rdx
movq 240(%r8), %rax
movq %rdx, 232(%r8)
sbbq 240(%rsi), %rax
movq 248(%r8), %rdx
movq %rax, 240(%r8)
sbbq 248(%rsi), %rdx
movq %rdx, 248(%r8)
sbbq $0x00, %rcx
subq $0x200, %rsi
movq -256(%r8), %rax
subq -256(%rsi), %rax
movq -248(%r8), %rdx
movq %rax, -256(%r8)
sbbq -248(%rsi), %rdx
movq -240(%r8), %rax
movq %rdx, -248(%r8)
sbbq -240(%rsi), %rax
movq -232(%r8), %rdx
movq %rax, -240(%r8)
sbbq -232(%rsi), %rdx
movq -224(%r8), %rax
movq %rdx, -232(%r8)
sbbq -224(%rsi), %rax
movq -216(%r8), %rdx
movq %rax, -224(%r8)
sbbq -216(%rsi), %rdx
movq -208(%r8), %rax
movq %rdx, -216(%r8)
sbbq -208(%rsi), %rax
movq -200(%r8), %rdx
movq %rax, -208(%r8)
sbbq -200(%rsi), %rdx
movq -192(%r8), %rax
movq %rdx, -200(%r8)
sbbq -192(%rsi), %rax
movq -184(%r8), %rdx
movq %rax, -192(%r8)
sbbq -184(%rsi), %rdx
movq -176(%r8), %rax
movq %rdx, -184(%r8)
sbbq -176(%rsi), %rax
movq -168(%r8), %rdx
movq %rax, -176(%r8)
sbbq -168(%rsi), %rdx
movq -160(%r8), %rax
movq %rdx, -168(%r8)
sbbq -160(%rsi), %rax
movq -152(%r8), %rdx
movq %rax, -160(%r8)
sbbq -152(%rsi), %rdx
movq -144(%r8), %rax
movq %rdx, -152(%r8)
sbbq -144(%rsi), %rax
movq -136(%r8), %rdx
movq %rax, -144(%r8)
sbbq -136(%rsi), %rdx
movq -128(%r8), %rax
movq %rdx, -136(%r8)
sbbq -128(%rsi), %rax
movq -120(%r8), %rdx
movq %rax, -128(%r8)
sbbq -120(%rsi), %rdx
movq -112(%r8), %rax
movq %rdx, -120(%r8)
sbbq -112(%rsi), %rax
movq -104(%r8), %rdx
movq %rax, -112(%r8)
sbbq -104(%rsi), %rdx
movq -96(%r8), %rax
movq %rdx, -104(%r8)
sbbq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq 96(%r8), %rax
movq %rdx, 88(%r8)
sbbq 96(%rsi), %rax
movq 104(%r8), %rdx
movq %rax, 96(%r8)
sbbq 104(%rsi), %rdx
movq 112(%r8), %rax
movq %rdx, 104(%r8)
sbbq 112(%rsi), %rax
movq 120(%r8), %rdx
movq %rax, 112(%r8)
sbbq 120(%rsi), %rdx
movq 128(%r8), %rax
movq %rdx, 120(%r8)
sbbq 128(%rsi), %rax
movq 136(%r8), %rdx
movq %rax, 128(%r8)
sbbq 136(%rsi), %rdx
movq 144(%r8), %rax
movq %rdx, 136(%r8)
sbbq 144(%rsi), %rax
movq 152(%r8), %rdx
movq %rax, 144(%r8)
sbbq 152(%rsi), %rdx
movq 160(%r8), %rax
movq %rdx, 152(%r8)
sbbq 160(%rsi), %rax
movq 168(%r8), %rdx
movq %rax, 160(%r8)
sbbq 168(%rsi), %rdx
movq 176(%r8), %rax
movq %rdx, 168(%r8)
sbbq 176(%rsi), %rax
movq 184(%r8), %rdx
movq %rax, 176(%r8)
sbbq 184(%rsi), %rdx
movq 192(%r8), %rax
movq %rdx, 184(%r8)
sbbq 192(%rsi), %rax
movq 200(%r8), %rdx
movq %rax, 192(%r8)
sbbq 200(%rsi), %rdx
movq 208(%r8), %rax
movq %rdx, 200(%r8)
sbbq 208(%rsi), %rax
movq 216(%r8), %rdx
movq %rax, 208(%r8)
sbbq 216(%rsi), %rdx
movq 224(%r8), %rax
movq %rdx, 216(%r8)
sbbq 224(%rsi), %rax
movq 232(%r8), %rdx
movq %rax, 224(%r8)
sbbq 232(%rsi), %rdx
movq 240(%r8), %rax
movq %rdx, 232(%r8)
sbbq 240(%rsi), %rax
movq 248(%r8), %rdx
movq %rax, 240(%r8)
sbbq 248(%rsi), %rdx
movq %rdx, 248(%r8)
sbbq $0x00, %rcx
movq 512(%rsp), %rdi
negq %rcx
addq $0x200, %rdi
movq -256(%rdi), %rax
subq -256(%r8), %rax
movq -248(%rdi), %rdx
movq %rax, -256(%rdi)
sbbq -248(%r8), %rdx
movq -240(%rdi), %rax
movq %rdx, -248(%rdi)
sbbq -240(%r8), %rax
movq -232(%rdi), %rdx
movq %rax, -240(%rdi)
sbbq -232(%r8), %rdx
movq -224(%rdi), %rax
movq %rdx, -232(%rdi)
sbbq -224(%r8), %rax
movq -216(%rdi), %rdx
movq %rax, -224(%rdi)
sbbq -216(%r8), %rdx
movq -208(%rdi), %rax
movq %rdx, -216(%rdi)
sbbq -208(%r8), %rax
movq -200(%rdi), %rdx
movq %rax, -208(%rdi)
sbbq -200(%r8), %rdx
movq -192(%rdi), %rax
movq %rdx, -200(%rdi)
sbbq -192(%r8), %rax
movq -184(%rdi), %rdx
movq %rax, -192(%rdi)
sbbq -184(%r8), %rdx
movq -176(%rdi), %rax
movq %rdx, -184(%rdi)
sbbq -176(%r8), %rax
movq -168(%rdi), %rdx
movq %rax, -176(%rdi)
sbbq -168(%r8), %rdx
movq -160(%rdi), %rax
movq %rdx, -168(%rdi)
sbbq -160(%r8), %rax
movq -152(%rdi), %rdx
movq %rax, -160(%rdi)
sbbq -152(%r8), %rdx
movq -144(%rdi), %rax
movq %rdx, -152(%rdi)
sbbq -144(%r8), %rax
movq -136(%rdi), %rdx
movq %rax, -144(%rdi)
sbbq -136(%r8), %rdx
movq -128(%rdi), %rax
movq %rdx, -136(%rdi)
sbbq -128(%r8), %rax
movq -120(%rdi), %rdx
movq %rax, -128(%rdi)
sbbq -120(%r8), %rdx
movq -112(%rdi), %rax
movq %rdx, -120(%rdi)
sbbq -112(%r8), %rax
movq -104(%rdi), %rdx
movq %rax, -112(%rdi)
sbbq -104(%r8), %rdx
movq -96(%rdi), %rax
movq %rdx, -104(%rdi)
sbbq -96(%r8), %rax
movq -88(%rdi), %rdx
movq %rax, -96(%rdi)
sbbq -88(%r8), %rdx
movq -80(%rdi), %rax
movq %rdx, -88(%rdi)
sbbq -80(%r8), %rax
movq -72(%rdi), %rdx
movq %rax, -80(%rdi)
sbbq -72(%r8), %rdx
movq -64(%rdi), %rax
movq %rdx, -72(%rdi)
sbbq -64(%r8), %rax
movq -56(%rdi), %rdx
movq %rax, -64(%rdi)
sbbq -56(%r8), %rdx
movq -48(%rdi), %rax
movq %rdx, -56(%rdi)
sbbq -48(%r8), %rax
movq -40(%rdi), %rdx
movq %rax, -48(%rdi)
sbbq -40(%r8), %rdx
movq -32(%rdi), %rax
movq %rdx, -40(%rdi)
sbbq -32(%r8), %rax
movq -24(%rdi), %rdx
movq %rax, -32(%rdi)
sbbq -24(%r8), %rdx
movq -16(%rdi), %rax
movq %rdx, -24(%rdi)
sbbq -16(%r8), %rax
movq -8(%rdi), %rdx
movq %rax, -16(%rdi)
sbbq -8(%r8), %rdx
movq (%rdi), %rax
movq %rdx, -8(%rdi)
sbbq (%r8), %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
sbbq 8(%r8), %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
sbbq 16(%r8), %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
sbbq 24(%r8), %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
sbbq 32(%r8), %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
sbbq 40(%r8), %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
sbbq 48(%r8), %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
sbbq 56(%r8), %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
sbbq 64(%r8), %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
sbbq 72(%r8), %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
sbbq 80(%r8), %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
sbbq 88(%r8), %rdx
movq 96(%rdi), %rax
movq %rdx, 88(%rdi)
sbbq 96(%r8), %rax
movq 104(%rdi), %rdx
movq %rax, 96(%rdi)
sbbq 104(%r8), %rdx
movq 112(%rdi), %rax
movq %rdx, 104(%rdi)
sbbq 112(%r8), %rax
movq 120(%rdi), %rdx
movq %rax, 112(%rdi)
sbbq 120(%r8), %rdx
movq 128(%rdi), %rax
movq %rdx, 120(%rdi)
sbbq 128(%r8), %rax
movq 136(%rdi), %rdx
movq %rax, 128(%rdi)
sbbq 136(%r8), %rdx
movq 144(%rdi), %rax
movq %rdx, 136(%rdi)
sbbq 144(%r8), %rax
movq 152(%rdi), %rdx
movq %rax, 144(%rdi)
sbbq 152(%r8), %rdx
movq 160(%rdi), %rax
movq %rdx, 152(%rdi)
sbbq 160(%r8), %rax
movq 168(%rdi), %rdx
movq %rax, 160(%rdi)
sbbq 168(%r8), %rdx
movq 176(%rdi), %rax
movq %rdx, 168(%rdi)
sbbq 176(%r8), %rax
movq 184(%rdi), %rdx
movq %rax, 176(%rdi)
sbbq 184(%r8), %rdx
movq 192(%rdi), %rax
movq %rdx, 184(%rdi)
sbbq 192(%r8), %rax
movq 200(%rdi), %rdx
movq %rax, 192(%rdi)
sbbq 200(%r8), %rdx
movq 208(%rdi), %rax
movq %rdx, 200(%rdi)
sbbq 208(%r8), %rax
movq 216(%rdi), %rdx
movq %rax, 208(%rdi)
sbbq 216(%r8), %rdx
movq 224(%rdi), %rax
movq %rdx, 216(%rdi)
sbbq 224(%r8), %rax
movq 232(%rdi), %rdx
movq %rax, 224(%rdi)
sbbq 232(%r8), %rdx
movq 240(%rdi), %rax
movq %rdx, 232(%rdi)
sbbq 240(%r8), %rax
movq 248(%rdi), %rdx
movq %rax, 240(%rdi)
sbbq 248(%r8), %rdx
movq %rdx, 248(%rdi)
sbbq $0x00, %rcx
movq 512(%rsp), %rdi
addq $0x300, %rdi
# Add in word
movq (%rdi), %rax
addq %rcx, %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
adcq $0x00, %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
adcq $0x00, %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
adcq $0x00, %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
adcq $0x00, %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
adcq $0x00, %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
adcq $0x00, %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
adcq $0x00, %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
adcq $0x00, %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
adcq $0x00, %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
adcq $0x00, %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
adcq $0x00, %rdx
movq 96(%rdi), %rax
movq %rdx, 88(%rdi)
adcq $0x00, %rax
movq 104(%rdi), %rdx
movq %rax, 96(%rdi)
adcq $0x00, %rdx
movq 112(%rdi), %rax
movq %rdx, 104(%rdi)
adcq $0x00, %rax
movq 120(%rdi), %rdx
movq %rax, 112(%rdi)
adcq $0x00, %rdx
movq 128(%rdi), %rax
movq %rdx, 120(%rdi)
adcq $0x00, %rax
movq 136(%rdi), %rdx
movq %rax, 128(%rdi)
adcq $0x00, %rdx
movq 144(%rdi), %rax
movq %rdx, 136(%rdi)
adcq $0x00, %rax
movq 152(%rdi), %rdx
movq %rax, 144(%rdi)
adcq $0x00, %rdx
movq 160(%rdi), %rax
movq %rdx, 152(%rdi)
adcq $0x00, %rax
movq 168(%rdi), %rdx
movq %rax, 160(%rdi)
adcq $0x00, %rdx
movq 176(%rdi), %rax
movq %rdx, 168(%rdi)
adcq $0x00, %rax
movq 184(%rdi), %rdx
movq %rax, 176(%rdi)
adcq $0x00, %rdx
movq 192(%rdi), %rax
movq %rdx, 184(%rdi)
adcq $0x00, %rax
movq 200(%rdi), %rdx
movq %rax, 192(%rdi)
adcq $0x00, %rdx
movq 208(%rdi), %rax
movq %rdx, 200(%rdi)
adcq $0x00, %rax
movq 216(%rdi), %rdx
movq %rax, 208(%rdi)
adcq $0x00, %rdx
movq 224(%rdi), %rax
movq %rdx, 216(%rdi)
adcq $0x00, %rax
movq 232(%rdi), %rdx
movq %rax, 224(%rdi)
adcq $0x00, %rdx
movq 240(%rdi), %rax
movq %rdx, 232(%rdi)
adcq $0x00, %rax
movq 248(%rdi), %rdx
movq %rax, 240(%rdi)
adcq $0x00, %rdx
movq %rdx, 248(%rdi)
movq 520(%rsp), %rsi
movq 512(%rsp), %rdi
addq $0x210, %rsp
repz retq
#ifndef __APPLE__
.size sp_4096_sqr_64,.-sp_4096_sqr_64
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Square a and put result in r. (r = a * a)
*
* Karatsuba: ah^2, al^2, (al - ah)^2
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_4096_sqr_avx2_64
.type sp_4096_sqr_avx2_64,@function
.align 16
sp_4096_sqr_avx2_64:
#else
.section __TEXT,__text
.globl _sp_4096_sqr_avx2_64
.p2align 4
_sp_4096_sqr_avx2_64:
#endif /* __APPLE__ */
subq $0x210, %rsp
movq %rdi, 512(%rsp)
movq %rsi, 520(%rsp)
movq $0x00, %rcx
movq %rsp, %r8
leaq 256(%rsi), %r9
movq (%rsi), %rdx
subq (%r9), %rdx
movq 8(%rsi), %rax
movq %rdx, (%r8)
sbbq 8(%r9), %rax
movq 16(%rsi), %rdx
movq %rax, 8(%r8)
sbbq 16(%r9), %rdx
movq 24(%rsi), %rax
movq %rdx, 16(%r8)
sbbq 24(%r9), %rax
movq 32(%rsi), %rdx
movq %rax, 24(%r8)
sbbq 32(%r9), %rdx
movq 40(%rsi), %rax
movq %rdx, 32(%r8)
sbbq 40(%r9), %rax
movq 48(%rsi), %rdx
movq %rax, 40(%r8)
sbbq 48(%r9), %rdx
movq 56(%rsi), %rax
movq %rdx, 48(%r8)
sbbq 56(%r9), %rax
movq 64(%rsi), %rdx
movq %rax, 56(%r8)
sbbq 64(%r9), %rdx
movq 72(%rsi), %rax
movq %rdx, 64(%r8)
sbbq 72(%r9), %rax
movq 80(%rsi), %rdx
movq %rax, 72(%r8)
sbbq 80(%r9), %rdx
movq 88(%rsi), %rax
movq %rdx, 80(%r8)
sbbq 88(%r9), %rax
movq 96(%rsi), %rdx
movq %rax, 88(%r8)
sbbq 96(%r9), %rdx
movq 104(%rsi), %rax
movq %rdx, 96(%r8)
sbbq 104(%r9), %rax
movq 112(%rsi), %rdx
movq %rax, 104(%r8)
sbbq 112(%r9), %rdx
movq 120(%rsi), %rax
movq %rdx, 112(%r8)
sbbq 120(%r9), %rax
movq 128(%rsi), %rdx
movq %rax, 120(%r8)
sbbq 128(%r9), %rdx
movq 136(%rsi), %rax
movq %rdx, 128(%r8)
sbbq 136(%r9), %rax
movq 144(%rsi), %rdx
movq %rax, 136(%r8)
sbbq 144(%r9), %rdx
movq 152(%rsi), %rax
movq %rdx, 144(%r8)
sbbq 152(%r9), %rax
movq 160(%rsi), %rdx
movq %rax, 152(%r8)
sbbq 160(%r9), %rdx
movq 168(%rsi), %rax
movq %rdx, 160(%r8)
sbbq 168(%r9), %rax
movq 176(%rsi), %rdx
movq %rax, 168(%r8)
sbbq 176(%r9), %rdx
movq 184(%rsi), %rax
movq %rdx, 176(%r8)
sbbq 184(%r9), %rax
movq 192(%rsi), %rdx
movq %rax, 184(%r8)
sbbq 192(%r9), %rdx
movq 200(%rsi), %rax
movq %rdx, 192(%r8)
sbbq 200(%r9), %rax
movq 208(%rsi), %rdx
movq %rax, 200(%r8)
sbbq 208(%r9), %rdx
movq 216(%rsi), %rax
movq %rdx, 208(%r8)
sbbq 216(%r9), %rax
movq 224(%rsi), %rdx
movq %rax, 216(%r8)
sbbq 224(%r9), %rdx
movq 232(%rsi), %rax
movq %rdx, 224(%r8)
sbbq 232(%r9), %rax
movq 240(%rsi), %rdx
movq %rax, 232(%r8)
sbbq 240(%r9), %rdx
movq 248(%rsi), %rax
movq %rdx, 240(%r8)
sbbq 248(%r9), %rax
movq %rax, 248(%r8)
sbbq $0x00, %rcx
# Cond Negate
movq (%r8), %rdx
movq %rcx, %r9
xorq %rcx, %rdx
negq %r9
subq %rcx, %rdx
movq 8(%r8), %rax
sbbq $0x00, %r9
movq %rdx, (%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 16(%r8), %rdx
setc %r9b
movq %rax, 8(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 24(%r8), %rax
setc %r9b
movq %rdx, 16(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 32(%r8), %rdx
setc %r9b
movq %rax, 24(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 40(%r8), %rax
setc %r9b
movq %rdx, 32(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 48(%r8), %rdx
setc %r9b
movq %rax, 40(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 56(%r8), %rax
setc %r9b
movq %rdx, 48(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 64(%r8), %rdx
setc %r9b
movq %rax, 56(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 72(%r8), %rax
setc %r9b
movq %rdx, 64(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 80(%r8), %rdx
setc %r9b
movq %rax, 72(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 88(%r8), %rax
setc %r9b
movq %rdx, 80(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 96(%r8), %rdx
setc %r9b
movq %rax, 88(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 104(%r8), %rax
setc %r9b
movq %rdx, 96(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 112(%r8), %rdx
setc %r9b
movq %rax, 104(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 120(%r8), %rax
setc %r9b
movq %rdx, 112(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 128(%r8), %rdx
setc %r9b
movq %rax, 120(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 136(%r8), %rax
setc %r9b
movq %rdx, 128(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 144(%r8), %rdx
setc %r9b
movq %rax, 136(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 152(%r8), %rax
setc %r9b
movq %rdx, 144(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 160(%r8), %rdx
setc %r9b
movq %rax, 152(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 168(%r8), %rax
setc %r9b
movq %rdx, 160(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 176(%r8), %rdx
setc %r9b
movq %rax, 168(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 184(%r8), %rax
setc %r9b
movq %rdx, 176(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 192(%r8), %rdx
setc %r9b
movq %rax, 184(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 200(%r8), %rax
setc %r9b
movq %rdx, 192(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 208(%r8), %rdx
setc %r9b
movq %rax, 200(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 216(%r8), %rax
setc %r9b
movq %rdx, 208(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 224(%r8), %rdx
setc %r9b
movq %rax, 216(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 232(%r8), %rax
setc %r9b
movq %rdx, 224(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq 240(%r8), %rdx
setc %r9b
movq %rax, 232(%r8)
xorq %rcx, %rdx
addq %r9, %rdx
movq 248(%r8), %rax
setc %r9b
movq %rdx, 240(%r8)
xorq %rcx, %rax
addq %r9, %rax
movq %rax, 248(%r8)
movq %r8, %rsi
movq %rsp, %rdi
#ifndef __APPLE__
callq sp_2048_sqr_avx2_32@plt
#else
callq _sp_2048_sqr_avx2_32
#endif /* __APPLE__ */
movq 520(%rsp), %rsi
movq 512(%rsp), %rdi
addq $0x100, %rsi
addq $0x200, %rdi
#ifndef __APPLE__
callq sp_2048_sqr_avx2_32@plt
#else
callq _sp_2048_sqr_avx2_32
#endif /* __APPLE__ */
movq 520(%rsp), %rsi
movq 512(%rsp), %rdi
#ifndef __APPLE__
callq sp_2048_sqr_avx2_32@plt
#else
callq _sp_2048_sqr_avx2_32
#endif /* __APPLE__ */
#ifdef _WIN64
movq 520(%rsp), %rsi
movq 512(%rsp), %rdi
#endif /* _WIN64 */
movq 512(%rsp), %rsi
leaq 256(%rsp), %r8
addq $0x300, %rsi
movq $0x00, %rcx
movq -256(%r8), %rax
subq -256(%rsi), %rax
movq -248(%r8), %rdx
movq %rax, -256(%r8)
sbbq -248(%rsi), %rdx
movq -240(%r8), %rax
movq %rdx, -248(%r8)
sbbq -240(%rsi), %rax
movq -232(%r8), %rdx
movq %rax, -240(%r8)
sbbq -232(%rsi), %rdx
movq -224(%r8), %rax
movq %rdx, -232(%r8)
sbbq -224(%rsi), %rax
movq -216(%r8), %rdx
movq %rax, -224(%r8)
sbbq -216(%rsi), %rdx
movq -208(%r8), %rax
movq %rdx, -216(%r8)
sbbq -208(%rsi), %rax
movq -200(%r8), %rdx
movq %rax, -208(%r8)
sbbq -200(%rsi), %rdx
movq -192(%r8), %rax
movq %rdx, -200(%r8)
sbbq -192(%rsi), %rax
movq -184(%r8), %rdx
movq %rax, -192(%r8)
sbbq -184(%rsi), %rdx
movq -176(%r8), %rax
movq %rdx, -184(%r8)
sbbq -176(%rsi), %rax
movq -168(%r8), %rdx
movq %rax, -176(%r8)
sbbq -168(%rsi), %rdx
movq -160(%r8), %rax
movq %rdx, -168(%r8)
sbbq -160(%rsi), %rax
movq -152(%r8), %rdx
movq %rax, -160(%r8)
sbbq -152(%rsi), %rdx
movq -144(%r8), %rax
movq %rdx, -152(%r8)
sbbq -144(%rsi), %rax
movq -136(%r8), %rdx
movq %rax, -144(%r8)
sbbq -136(%rsi), %rdx
movq -128(%r8), %rax
movq %rdx, -136(%r8)
sbbq -128(%rsi), %rax
movq -120(%r8), %rdx
movq %rax, -128(%r8)
sbbq -120(%rsi), %rdx
movq -112(%r8), %rax
movq %rdx, -120(%r8)
sbbq -112(%rsi), %rax
movq -104(%r8), %rdx
movq %rax, -112(%r8)
sbbq -104(%rsi), %rdx
movq -96(%r8), %rax
movq %rdx, -104(%r8)
sbbq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq 96(%r8), %rax
movq %rdx, 88(%r8)
sbbq 96(%rsi), %rax
movq 104(%r8), %rdx
movq %rax, 96(%r8)
sbbq 104(%rsi), %rdx
movq 112(%r8), %rax
movq %rdx, 104(%r8)
sbbq 112(%rsi), %rax
movq 120(%r8), %rdx
movq %rax, 112(%r8)
sbbq 120(%rsi), %rdx
movq 128(%r8), %rax
movq %rdx, 120(%r8)
sbbq 128(%rsi), %rax
movq 136(%r8), %rdx
movq %rax, 128(%r8)
sbbq 136(%rsi), %rdx
movq 144(%r8), %rax
movq %rdx, 136(%r8)
sbbq 144(%rsi), %rax
movq 152(%r8), %rdx
movq %rax, 144(%r8)
sbbq 152(%rsi), %rdx
movq 160(%r8), %rax
movq %rdx, 152(%r8)
sbbq 160(%rsi), %rax
movq 168(%r8), %rdx
movq %rax, 160(%r8)
sbbq 168(%rsi), %rdx
movq 176(%r8), %rax
movq %rdx, 168(%r8)
sbbq 176(%rsi), %rax
movq 184(%r8), %rdx
movq %rax, 176(%r8)
sbbq 184(%rsi), %rdx
movq 192(%r8), %rax
movq %rdx, 184(%r8)
sbbq 192(%rsi), %rax
movq 200(%r8), %rdx
movq %rax, 192(%r8)
sbbq 200(%rsi), %rdx
movq 208(%r8), %rax
movq %rdx, 200(%r8)
sbbq 208(%rsi), %rax
movq 216(%r8), %rdx
movq %rax, 208(%r8)
sbbq 216(%rsi), %rdx
movq 224(%r8), %rax
movq %rdx, 216(%r8)
sbbq 224(%rsi), %rax
movq 232(%r8), %rdx
movq %rax, 224(%r8)
sbbq 232(%rsi), %rdx
movq 240(%r8), %rax
movq %rdx, 232(%r8)
sbbq 240(%rsi), %rax
movq 248(%r8), %rdx
movq %rax, 240(%r8)
sbbq 248(%rsi), %rdx
movq %rdx, 248(%r8)
sbbq $0x00, %rcx
subq $0x200, %rsi
movq -256(%r8), %rax
subq -256(%rsi), %rax
movq -248(%r8), %rdx
movq %rax, -256(%r8)
sbbq -248(%rsi), %rdx
movq -240(%r8), %rax
movq %rdx, -248(%r8)
sbbq -240(%rsi), %rax
movq -232(%r8), %rdx
movq %rax, -240(%r8)
sbbq -232(%rsi), %rdx
movq -224(%r8), %rax
movq %rdx, -232(%r8)
sbbq -224(%rsi), %rax
movq -216(%r8), %rdx
movq %rax, -224(%r8)
sbbq -216(%rsi), %rdx
movq -208(%r8), %rax
movq %rdx, -216(%r8)
sbbq -208(%rsi), %rax
movq -200(%r8), %rdx
movq %rax, -208(%r8)
sbbq -200(%rsi), %rdx
movq -192(%r8), %rax
movq %rdx, -200(%r8)
sbbq -192(%rsi), %rax
movq -184(%r8), %rdx
movq %rax, -192(%r8)
sbbq -184(%rsi), %rdx
movq -176(%r8), %rax
movq %rdx, -184(%r8)
sbbq -176(%rsi), %rax
movq -168(%r8), %rdx
movq %rax, -176(%r8)
sbbq -168(%rsi), %rdx
movq -160(%r8), %rax
movq %rdx, -168(%r8)
sbbq -160(%rsi), %rax
movq -152(%r8), %rdx
movq %rax, -160(%r8)
sbbq -152(%rsi), %rdx
movq -144(%r8), %rax
movq %rdx, -152(%r8)
sbbq -144(%rsi), %rax
movq -136(%r8), %rdx
movq %rax, -144(%r8)
sbbq -136(%rsi), %rdx
movq -128(%r8), %rax
movq %rdx, -136(%r8)
sbbq -128(%rsi), %rax
movq -120(%r8), %rdx
movq %rax, -128(%r8)
sbbq -120(%rsi), %rdx
movq -112(%r8), %rax
movq %rdx, -120(%r8)
sbbq -112(%rsi), %rax
movq -104(%r8), %rdx
movq %rax, -112(%r8)
sbbq -104(%rsi), %rdx
movq -96(%r8), %rax
movq %rdx, -104(%r8)
sbbq -96(%rsi), %rax
movq -88(%r8), %rdx
movq %rax, -96(%r8)
sbbq -88(%rsi), %rdx
movq -80(%r8), %rax
movq %rdx, -88(%r8)
sbbq -80(%rsi), %rax
movq -72(%r8), %rdx
movq %rax, -80(%r8)
sbbq -72(%rsi), %rdx
movq -64(%r8), %rax
movq %rdx, -72(%r8)
sbbq -64(%rsi), %rax
movq -56(%r8), %rdx
movq %rax, -64(%r8)
sbbq -56(%rsi), %rdx
movq -48(%r8), %rax
movq %rdx, -56(%r8)
sbbq -48(%rsi), %rax
movq -40(%r8), %rdx
movq %rax, -48(%r8)
sbbq -40(%rsi), %rdx
movq -32(%r8), %rax
movq %rdx, -40(%r8)
sbbq -32(%rsi), %rax
movq -24(%r8), %rdx
movq %rax, -32(%r8)
sbbq -24(%rsi), %rdx
movq -16(%r8), %rax
movq %rdx, -24(%r8)
sbbq -16(%rsi), %rax
movq -8(%r8), %rdx
movq %rax, -16(%r8)
sbbq -8(%rsi), %rdx
movq (%r8), %rax
movq %rdx, -8(%r8)
sbbq (%rsi), %rax
movq 8(%r8), %rdx
movq %rax, (%r8)
sbbq 8(%rsi), %rdx
movq 16(%r8), %rax
movq %rdx, 8(%r8)
sbbq 16(%rsi), %rax
movq 24(%r8), %rdx
movq %rax, 16(%r8)
sbbq 24(%rsi), %rdx
movq 32(%r8), %rax
movq %rdx, 24(%r8)
sbbq 32(%rsi), %rax
movq 40(%r8), %rdx
movq %rax, 32(%r8)
sbbq 40(%rsi), %rdx
movq 48(%r8), %rax
movq %rdx, 40(%r8)
sbbq 48(%rsi), %rax
movq 56(%r8), %rdx
movq %rax, 48(%r8)
sbbq 56(%rsi), %rdx
movq 64(%r8), %rax
movq %rdx, 56(%r8)
sbbq 64(%rsi), %rax
movq 72(%r8), %rdx
movq %rax, 64(%r8)
sbbq 72(%rsi), %rdx
movq 80(%r8), %rax
movq %rdx, 72(%r8)
sbbq 80(%rsi), %rax
movq 88(%r8), %rdx
movq %rax, 80(%r8)
sbbq 88(%rsi), %rdx
movq 96(%r8), %rax
movq %rdx, 88(%r8)
sbbq 96(%rsi), %rax
movq 104(%r8), %rdx
movq %rax, 96(%r8)
sbbq 104(%rsi), %rdx
movq 112(%r8), %rax
movq %rdx, 104(%r8)
sbbq 112(%rsi), %rax
movq 120(%r8), %rdx
movq %rax, 112(%r8)
sbbq 120(%rsi), %rdx
movq 128(%r8), %rax
movq %rdx, 120(%r8)
sbbq 128(%rsi), %rax
movq 136(%r8), %rdx
movq %rax, 128(%r8)
sbbq 136(%rsi), %rdx
movq 144(%r8), %rax
movq %rdx, 136(%r8)
sbbq 144(%rsi), %rax
movq 152(%r8), %rdx
movq %rax, 144(%r8)
sbbq 152(%rsi), %rdx
movq 160(%r8), %rax
movq %rdx, 152(%r8)
sbbq 160(%rsi), %rax
movq 168(%r8), %rdx
movq %rax, 160(%r8)
sbbq 168(%rsi), %rdx
movq 176(%r8), %rax
movq %rdx, 168(%r8)
sbbq 176(%rsi), %rax
movq 184(%r8), %rdx
movq %rax, 176(%r8)
sbbq 184(%rsi), %rdx
movq 192(%r8), %rax
movq %rdx, 184(%r8)
sbbq 192(%rsi), %rax
movq 200(%r8), %rdx
movq %rax, 192(%r8)
sbbq 200(%rsi), %rdx
movq 208(%r8), %rax
movq %rdx, 200(%r8)
sbbq 208(%rsi), %rax
movq 216(%r8), %rdx
movq %rax, 208(%r8)
sbbq 216(%rsi), %rdx
movq 224(%r8), %rax
movq %rdx, 216(%r8)
sbbq 224(%rsi), %rax
movq 232(%r8), %rdx
movq %rax, 224(%r8)
sbbq 232(%rsi), %rdx
movq 240(%r8), %rax
movq %rdx, 232(%r8)
sbbq 240(%rsi), %rax
movq 248(%r8), %rdx
movq %rax, 240(%r8)
sbbq 248(%rsi), %rdx
movq %rdx, 248(%r8)
sbbq $0x00, %rcx
movq 512(%rsp), %rdi
negq %rcx
addq $0x200, %rdi
movq -256(%rdi), %rax
subq -256(%r8), %rax
movq -248(%rdi), %rdx
movq %rax, -256(%rdi)
sbbq -248(%r8), %rdx
movq -240(%rdi), %rax
movq %rdx, -248(%rdi)
sbbq -240(%r8), %rax
movq -232(%rdi), %rdx
movq %rax, -240(%rdi)
sbbq -232(%r8), %rdx
movq -224(%rdi), %rax
movq %rdx, -232(%rdi)
sbbq -224(%r8), %rax
movq -216(%rdi), %rdx
movq %rax, -224(%rdi)
sbbq -216(%r8), %rdx
movq -208(%rdi), %rax
movq %rdx, -216(%rdi)
sbbq -208(%r8), %rax
movq -200(%rdi), %rdx
movq %rax, -208(%rdi)
sbbq -200(%r8), %rdx
movq -192(%rdi), %rax
movq %rdx, -200(%rdi)
sbbq -192(%r8), %rax
movq -184(%rdi), %rdx
movq %rax, -192(%rdi)
sbbq -184(%r8), %rdx
movq -176(%rdi), %rax
movq %rdx, -184(%rdi)
sbbq -176(%r8), %rax
movq -168(%rdi), %rdx
movq %rax, -176(%rdi)
sbbq -168(%r8), %rdx
movq -160(%rdi), %rax
movq %rdx, -168(%rdi)
sbbq -160(%r8), %rax
movq -152(%rdi), %rdx
movq %rax, -160(%rdi)
sbbq -152(%r8), %rdx
movq -144(%rdi), %rax
movq %rdx, -152(%rdi)
sbbq -144(%r8), %rax
movq -136(%rdi), %rdx
movq %rax, -144(%rdi)
sbbq -136(%r8), %rdx
movq -128(%rdi), %rax
movq %rdx, -136(%rdi)
sbbq -128(%r8), %rax
movq -120(%rdi), %rdx
movq %rax, -128(%rdi)
sbbq -120(%r8), %rdx
movq -112(%rdi), %rax
movq %rdx, -120(%rdi)
sbbq -112(%r8), %rax
movq -104(%rdi), %rdx
movq %rax, -112(%rdi)
sbbq -104(%r8), %rdx
movq -96(%rdi), %rax
movq %rdx, -104(%rdi)
sbbq -96(%r8), %rax
movq -88(%rdi), %rdx
movq %rax, -96(%rdi)
sbbq -88(%r8), %rdx
movq -80(%rdi), %rax
movq %rdx, -88(%rdi)
sbbq -80(%r8), %rax
movq -72(%rdi), %rdx
movq %rax, -80(%rdi)
sbbq -72(%r8), %rdx
movq -64(%rdi), %rax
movq %rdx, -72(%rdi)
sbbq -64(%r8), %rax
movq -56(%rdi), %rdx
movq %rax, -64(%rdi)
sbbq -56(%r8), %rdx
movq -48(%rdi), %rax
movq %rdx, -56(%rdi)
sbbq -48(%r8), %rax
movq -40(%rdi), %rdx
movq %rax, -48(%rdi)
sbbq -40(%r8), %rdx
movq -32(%rdi), %rax
movq %rdx, -40(%rdi)
sbbq -32(%r8), %rax
movq -24(%rdi), %rdx
movq %rax, -32(%rdi)
sbbq -24(%r8), %rdx
movq -16(%rdi), %rax
movq %rdx, -24(%rdi)
sbbq -16(%r8), %rax
movq -8(%rdi), %rdx
movq %rax, -16(%rdi)
sbbq -8(%r8), %rdx
movq (%rdi), %rax
movq %rdx, -8(%rdi)
sbbq (%r8), %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
sbbq 8(%r8), %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
sbbq 16(%r8), %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
sbbq 24(%r8), %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
sbbq 32(%r8), %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
sbbq 40(%r8), %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
sbbq 48(%r8), %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
sbbq 56(%r8), %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
sbbq 64(%r8), %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
sbbq 72(%r8), %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
sbbq 80(%r8), %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
sbbq 88(%r8), %rdx
movq 96(%rdi), %rax
movq %rdx, 88(%rdi)
sbbq 96(%r8), %rax
movq 104(%rdi), %rdx
movq %rax, 96(%rdi)
sbbq 104(%r8), %rdx
movq 112(%rdi), %rax
movq %rdx, 104(%rdi)
sbbq 112(%r8), %rax
movq 120(%rdi), %rdx
movq %rax, 112(%rdi)
sbbq 120(%r8), %rdx
movq 128(%rdi), %rax
movq %rdx, 120(%rdi)
sbbq 128(%r8), %rax
movq 136(%rdi), %rdx
movq %rax, 128(%rdi)
sbbq 136(%r8), %rdx
movq 144(%rdi), %rax
movq %rdx, 136(%rdi)
sbbq 144(%r8), %rax
movq 152(%rdi), %rdx
movq %rax, 144(%rdi)
sbbq 152(%r8), %rdx
movq 160(%rdi), %rax
movq %rdx, 152(%rdi)
sbbq 160(%r8), %rax
movq 168(%rdi), %rdx
movq %rax, 160(%rdi)
sbbq 168(%r8), %rdx
movq 176(%rdi), %rax
movq %rdx, 168(%rdi)
sbbq 176(%r8), %rax
movq 184(%rdi), %rdx
movq %rax, 176(%rdi)
sbbq 184(%r8), %rdx
movq 192(%rdi), %rax
movq %rdx, 184(%rdi)
sbbq 192(%r8), %rax
movq 200(%rdi), %rdx
movq %rax, 192(%rdi)
sbbq 200(%r8), %rdx
movq 208(%rdi), %rax
movq %rdx, 200(%rdi)
sbbq 208(%r8), %rax
movq 216(%rdi), %rdx
movq %rax, 208(%rdi)
sbbq 216(%r8), %rdx
movq 224(%rdi), %rax
movq %rdx, 216(%rdi)
sbbq 224(%r8), %rax
movq 232(%rdi), %rdx
movq %rax, 224(%rdi)
sbbq 232(%r8), %rdx
movq 240(%rdi), %rax
movq %rdx, 232(%rdi)
sbbq 240(%r8), %rax
movq 248(%rdi), %rdx
movq %rax, 240(%rdi)
sbbq 248(%r8), %rdx
movq %rdx, 248(%rdi)
sbbq $0x00, %rcx
movq 512(%rsp), %rdi
addq $0x300, %rdi
# Add in word
movq (%rdi), %rax
addq %rcx, %rax
movq 8(%rdi), %rdx
movq %rax, (%rdi)
adcq $0x00, %rdx
movq 16(%rdi), %rax
movq %rdx, 8(%rdi)
adcq $0x00, %rax
movq 24(%rdi), %rdx
movq %rax, 16(%rdi)
adcq $0x00, %rdx
movq 32(%rdi), %rax
movq %rdx, 24(%rdi)
adcq $0x00, %rax
movq 40(%rdi), %rdx
movq %rax, 32(%rdi)
adcq $0x00, %rdx
movq 48(%rdi), %rax
movq %rdx, 40(%rdi)
adcq $0x00, %rax
movq 56(%rdi), %rdx
movq %rax, 48(%rdi)
adcq $0x00, %rdx
movq 64(%rdi), %rax
movq %rdx, 56(%rdi)
adcq $0x00, %rax
movq 72(%rdi), %rdx
movq %rax, 64(%rdi)
adcq $0x00, %rdx
movq 80(%rdi), %rax
movq %rdx, 72(%rdi)
adcq $0x00, %rax
movq 88(%rdi), %rdx
movq %rax, 80(%rdi)
adcq $0x00, %rdx
movq 96(%rdi), %rax
movq %rdx, 88(%rdi)
adcq $0x00, %rax
movq 104(%rdi), %rdx
movq %rax, 96(%rdi)
adcq $0x00, %rdx
movq 112(%rdi), %rax
movq %rdx, 104(%rdi)
adcq $0x00, %rax
movq 120(%rdi), %rdx
movq %rax, 112(%rdi)
adcq $0x00, %rdx
movq 128(%rdi), %rax
movq %rdx, 120(%rdi)
adcq $0x00, %rax
movq 136(%rdi), %rdx
movq %rax, 128(%rdi)
adcq $0x00, %rdx
movq 144(%rdi), %rax
movq %rdx, 136(%rdi)
adcq $0x00, %rax
movq 152(%rdi), %rdx
movq %rax, 144(%rdi)
adcq $0x00, %rdx
movq 160(%rdi), %rax
movq %rdx, 152(%rdi)
adcq $0x00, %rax
movq 168(%rdi), %rdx
movq %rax, 160(%rdi)
adcq $0x00, %rdx
movq 176(%rdi), %rax
movq %rdx, 168(%rdi)
adcq $0x00, %rax
movq 184(%rdi), %rdx
movq %rax, 176(%rdi)
adcq $0x00, %rdx
movq 192(%rdi), %rax
movq %rdx, 184(%rdi)
adcq $0x00, %rax
movq 200(%rdi), %rdx
movq %rax, 192(%rdi)
adcq $0x00, %rdx
movq 208(%rdi), %rax
movq %rdx, 200(%rdi)
adcq $0x00, %rax
movq 216(%rdi), %rdx
movq %rax, 208(%rdi)
adcq $0x00, %rdx
movq 224(%rdi), %rax
movq %rdx, 216(%rdi)
adcq $0x00, %rax
movq 232(%rdi), %rdx
movq %rax, 224(%rdi)
adcq $0x00, %rdx
movq 240(%rdi), %rax
movq %rdx, 232(%rdi)
adcq $0x00, %rax
movq 248(%rdi), %rdx
movq %rax, 240(%rdi)
adcq $0x00, %rdx
movq %rdx, 248(%rdi)
movq 520(%rsp), %rsi
movq 512(%rsp), %rdi
addq $0x210, %rsp
repz retq
#ifndef __APPLE__
.size sp_4096_sqr_avx2_64,.-sp_4096_sqr_avx2_64
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_4096_mul_d_64
.type sp_4096_mul_d_64,@function
.align 16
sp_4096_mul_d_64:
#else
.section __TEXT,__text
.globl _sp_4096_mul_d_64
.p2align 4
_sp_4096_mul_d_64:
#endif /* __APPLE__ */
movq %rdx, %rcx
# A[0] * B
movq %rcx, %rax
xorq %r10, %r10
mulq (%rsi)
movq %rax, %r8
movq %rdx, %r9
movq %r8, (%rdi)
# A[1] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 8(%rsi)
addq %rax, %r9
movq %r9, 8(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 16(%rsi)
addq %rax, %r10
movq %r10, 16(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 24(%rsi)
addq %rax, %r8
movq %r8, 24(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 32(%rsi)
addq %rax, %r9
movq %r9, 32(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 40(%rsi)
addq %rax, %r10
movq %r10, 40(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 48(%rsi)
addq %rax, %r8
movq %r8, 48(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 56(%rsi)
addq %rax, %r9
movq %r9, 56(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 64(%rsi)
addq %rax, %r10
movq %r10, 64(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 72(%rsi)
addq %rax, %r8
movq %r8, 72(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 80(%rsi)
addq %rax, %r9
movq %r9, 80(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 88(%rsi)
addq %rax, %r10
movq %r10, 88(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 96(%rsi)
addq %rax, %r8
movq %r8, 96(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 104(%rsi)
addq %rax, %r9
movq %r9, 104(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 112(%rsi)
addq %rax, %r10
movq %r10, 112(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 120(%rsi)
addq %rax, %r8
movq %r8, 120(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[16] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 128(%rsi)
addq %rax, %r9
movq %r9, 128(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[17] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 136(%rsi)
addq %rax, %r10
movq %r10, 136(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[18] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 144(%rsi)
addq %rax, %r8
movq %r8, 144(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[19] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 152(%rsi)
addq %rax, %r9
movq %r9, 152(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[20] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 160(%rsi)
addq %rax, %r10
movq %r10, 160(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[21] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 168(%rsi)
addq %rax, %r8
movq %r8, 168(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[22] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 176(%rsi)
addq %rax, %r9
movq %r9, 176(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[23] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 184(%rsi)
addq %rax, %r10
movq %r10, 184(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[24] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 192(%rsi)
addq %rax, %r8
movq %r8, 192(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[25] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 200(%rsi)
addq %rax, %r9
movq %r9, 200(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[26] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 208(%rsi)
addq %rax, %r10
movq %r10, 208(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[27] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 216(%rsi)
addq %rax, %r8
movq %r8, 216(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[28] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 224(%rsi)
addq %rax, %r9
movq %r9, 224(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[29] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 232(%rsi)
addq %rax, %r10
movq %r10, 232(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[30] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 240(%rsi)
addq %rax, %r8
movq %r8, 240(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[31] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 248(%rsi)
addq %rax, %r9
movq %r9, 248(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[32] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 256(%rsi)
addq %rax, %r10
movq %r10, 256(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[33] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 264(%rsi)
addq %rax, %r8
movq %r8, 264(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[34] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 272(%rsi)
addq %rax, %r9
movq %r9, 272(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[35] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 280(%rsi)
addq %rax, %r10
movq %r10, 280(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[36] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 288(%rsi)
addq %rax, %r8
movq %r8, 288(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[37] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 296(%rsi)
addq %rax, %r9
movq %r9, 296(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[38] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 304(%rsi)
addq %rax, %r10
movq %r10, 304(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[39] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 312(%rsi)
addq %rax, %r8
movq %r8, 312(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[40] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 320(%rsi)
addq %rax, %r9
movq %r9, 320(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[41] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 328(%rsi)
addq %rax, %r10
movq %r10, 328(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[42] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 336(%rsi)
addq %rax, %r8
movq %r8, 336(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[43] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 344(%rsi)
addq %rax, %r9
movq %r9, 344(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[44] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 352(%rsi)
addq %rax, %r10
movq %r10, 352(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[45] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 360(%rsi)
addq %rax, %r8
movq %r8, 360(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[46] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 368(%rsi)
addq %rax, %r9
movq %r9, 368(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[47] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 376(%rsi)
addq %rax, %r10
movq %r10, 376(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[48] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 384(%rsi)
addq %rax, %r8
movq %r8, 384(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[49] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 392(%rsi)
addq %rax, %r9
movq %r9, 392(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[50] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 400(%rsi)
addq %rax, %r10
movq %r10, 400(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[51] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 408(%rsi)
addq %rax, %r8
movq %r8, 408(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[52] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 416(%rsi)
addq %rax, %r9
movq %r9, 416(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[53] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 424(%rsi)
addq %rax, %r10
movq %r10, 424(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[54] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 432(%rsi)
addq %rax, %r8
movq %r8, 432(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[55] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 440(%rsi)
addq %rax, %r9
movq %r9, 440(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[56] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 448(%rsi)
addq %rax, %r10
movq %r10, 448(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[57] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 456(%rsi)
addq %rax, %r8
movq %r8, 456(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[58] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 464(%rsi)
addq %rax, %r9
movq %r9, 464(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[59] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 472(%rsi)
addq %rax, %r10
movq %r10, 472(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[60] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 480(%rsi)
addq %rax, %r8
movq %r8, 480(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[61] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 488(%rsi)
addq %rax, %r9
movq %r9, 488(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[62] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 496(%rsi)
addq %rax, %r10
movq %r10, 496(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[63] * B
movq %rcx, %rax
mulq 504(%rsi)
addq %rax, %r8
adcq %rdx, %r9
movq %r8, 504(%rdi)
movq %r9, 512(%rdi)
repz retq
#ifndef __APPLE__
.size sp_4096_mul_d_64,.-sp_4096_mul_d_64
#endif /* __APPLE__ */
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_4096_cond_sub_64
.type sp_4096_cond_sub_64,@function
.align 16
sp_4096_cond_sub_64:
#else
.section __TEXT,__text
.globl _sp_4096_cond_sub_64
.p2align 4
_sp_4096_cond_sub_64:
#endif /* __APPLE__ */
subq $0x200, %rsp
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 80(%rsp)
movq %r9, 88(%rsp)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 96(%rsp)
movq %r9, 104(%rsp)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 112(%rsp)
movq %r9, 120(%rsp)
movq 128(%rdx), %r8
movq 136(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 128(%rsp)
movq %r9, 136(%rsp)
movq 144(%rdx), %r8
movq 152(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 144(%rsp)
movq %r9, 152(%rsp)
movq 160(%rdx), %r8
movq 168(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 160(%rsp)
movq %r9, 168(%rsp)
movq 176(%rdx), %r8
movq 184(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 176(%rsp)
movq %r9, 184(%rsp)
movq 192(%rdx), %r8
movq 200(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 192(%rsp)
movq %r9, 200(%rsp)
movq 208(%rdx), %r8
movq 216(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 208(%rsp)
movq %r9, 216(%rsp)
movq 224(%rdx), %r8
movq 232(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 224(%rsp)
movq %r9, 232(%rsp)
movq 240(%rdx), %r8
movq 248(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 240(%rsp)
movq %r9, 248(%rsp)
movq 256(%rdx), %r8
movq 264(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 256(%rsp)
movq %r9, 264(%rsp)
movq 272(%rdx), %r8
movq 280(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 272(%rsp)
movq %r9, 280(%rsp)
movq 288(%rdx), %r8
movq 296(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 288(%rsp)
movq %r9, 296(%rsp)
movq 304(%rdx), %r8
movq 312(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 304(%rsp)
movq %r9, 312(%rsp)
movq 320(%rdx), %r8
movq 328(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 320(%rsp)
movq %r9, 328(%rsp)
movq 336(%rdx), %r8
movq 344(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 336(%rsp)
movq %r9, 344(%rsp)
movq 352(%rdx), %r8
movq 360(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 352(%rsp)
movq %r9, 360(%rsp)
movq 368(%rdx), %r8
movq 376(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 368(%rsp)
movq %r9, 376(%rsp)
movq 384(%rdx), %r8
movq 392(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 384(%rsp)
movq %r9, 392(%rsp)
movq 400(%rdx), %r8
movq 408(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 400(%rsp)
movq %r9, 408(%rsp)
movq 416(%rdx), %r8
movq 424(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 416(%rsp)
movq %r9, 424(%rsp)
movq 432(%rdx), %r8
movq 440(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 432(%rsp)
movq %r9, 440(%rsp)
movq 448(%rdx), %r8
movq 456(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 448(%rsp)
movq %r9, 456(%rsp)
movq 464(%rdx), %r8
movq 472(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 464(%rsp)
movq %r9, 472(%rsp)
movq 480(%rdx), %r8
movq 488(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 480(%rsp)
movq %r9, 488(%rsp)
movq 496(%rdx), %r8
movq 504(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 496(%rsp)
movq %r9, 504(%rsp)
movq (%rsi), %r8
movq (%rsp), %rdx
subq %rdx, %r8
movq 8(%rsi), %r9
movq 8(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, (%rdi)
movq 16(%rsi), %r8
movq 16(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 8(%rdi)
movq 24(%rsi), %r9
movq 24(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 16(%rdi)
movq 32(%rsi), %r8
movq 32(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 24(%rdi)
movq 40(%rsi), %r9
movq 40(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 32(%rdi)
movq 48(%rsi), %r8
movq 48(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 40(%rdi)
movq 56(%rsi), %r9
movq 56(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 48(%rdi)
movq 64(%rsi), %r8
movq 64(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 56(%rdi)
movq 72(%rsi), %r9
movq 72(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 64(%rdi)
movq 80(%rsi), %r8
movq 80(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 72(%rdi)
movq 88(%rsi), %r9
movq 88(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 80(%rdi)
movq 96(%rsi), %r8
movq 96(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 88(%rdi)
movq 104(%rsi), %r9
movq 104(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 96(%rdi)
movq 112(%rsi), %r8
movq 112(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 104(%rdi)
movq 120(%rsi), %r9
movq 120(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 112(%rdi)
movq 128(%rsi), %r8
movq 128(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 120(%rdi)
movq 136(%rsi), %r9
movq 136(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 128(%rdi)
movq 144(%rsi), %r8
movq 144(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 136(%rdi)
movq 152(%rsi), %r9
movq 152(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 144(%rdi)
movq 160(%rsi), %r8
movq 160(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 152(%rdi)
movq 168(%rsi), %r9
movq 168(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 160(%rdi)
movq 176(%rsi), %r8
movq 176(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 168(%rdi)
movq 184(%rsi), %r9
movq 184(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 176(%rdi)
movq 192(%rsi), %r8
movq 192(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 184(%rdi)
movq 200(%rsi), %r9
movq 200(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 192(%rdi)
movq 208(%rsi), %r8
movq 208(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 200(%rdi)
movq 216(%rsi), %r9
movq 216(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 208(%rdi)
movq 224(%rsi), %r8
movq 224(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 216(%rdi)
movq 232(%rsi), %r9
movq 232(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 224(%rdi)
movq 240(%rsi), %r8
movq 240(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 232(%rdi)
movq 248(%rsi), %r9
movq 248(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 240(%rdi)
movq 256(%rsi), %r8
movq 256(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 248(%rdi)
movq 264(%rsi), %r9
movq 264(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 256(%rdi)
movq 272(%rsi), %r8
movq 272(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 264(%rdi)
movq 280(%rsi), %r9
movq 280(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 272(%rdi)
movq 288(%rsi), %r8
movq 288(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 280(%rdi)
movq 296(%rsi), %r9
movq 296(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 288(%rdi)
movq 304(%rsi), %r8
movq 304(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 296(%rdi)
movq 312(%rsi), %r9
movq 312(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 304(%rdi)
movq 320(%rsi), %r8
movq 320(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 312(%rdi)
movq 328(%rsi), %r9
movq 328(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 320(%rdi)
movq 336(%rsi), %r8
movq 336(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 328(%rdi)
movq 344(%rsi), %r9
movq 344(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 336(%rdi)
movq 352(%rsi), %r8
movq 352(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 344(%rdi)
movq 360(%rsi), %r9
movq 360(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 352(%rdi)
movq 368(%rsi), %r8
movq 368(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 360(%rdi)
movq 376(%rsi), %r9
movq 376(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 368(%rdi)
movq 384(%rsi), %r8
movq 384(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 376(%rdi)
movq 392(%rsi), %r9
movq 392(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 384(%rdi)
movq 400(%rsi), %r8
movq 400(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 392(%rdi)
movq 408(%rsi), %r9
movq 408(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 400(%rdi)
movq 416(%rsi), %r8
movq 416(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 408(%rdi)
movq 424(%rsi), %r9
movq 424(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 416(%rdi)
movq 432(%rsi), %r8
movq 432(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 424(%rdi)
movq 440(%rsi), %r9
movq 440(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 432(%rdi)
movq 448(%rsi), %r8
movq 448(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 440(%rdi)
movq 456(%rsi), %r9
movq 456(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 448(%rdi)
movq 464(%rsi), %r8
movq 464(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 456(%rdi)
movq 472(%rsi), %r9
movq 472(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 464(%rdi)
movq 480(%rsi), %r8
movq 480(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 472(%rdi)
movq 488(%rsi), %r9
movq 488(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 480(%rdi)
movq 496(%rsi), %r8
movq 496(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 488(%rdi)
movq 504(%rsi), %r9
movq 504(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 496(%rdi)
movq %r9, 504(%rdi)
sbbq %rax, %rax
addq $0x200, %rsp
repz retq
#ifndef __APPLE__
.size sp_4096_cond_sub_64,.-sp_4096_cond_sub_64
#endif /* __APPLE__ */
/* Reduce the number back to 4096 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_4096_mont_reduce_64
.type sp_4096_mont_reduce_64,@function
.align 16
sp_4096_mont_reduce_64:
#else
.section __TEXT,__text
.globl _sp_4096_mont_reduce_64
.p2align 4
_sp_4096_mont_reduce_64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rdx, %rcx
xorq %r15, %r15
# i = 64
movq $0x40, %r8
movq (%rdi), %r13
movq 8(%rdi), %r14
L_4096_mont_reduce_64_loop:
# mu = a[i] * mp
movq %r13, %r11
imulq %rcx, %r11
# a[i+0] += m[0] * mu
movq %r11, %rax
xorq %r10, %r10
mulq (%rsi)
addq %rax, %r13
adcq %rdx, %r10
# a[i+1] += m[1] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 8(%rsi)
movq %r14, %r13
addq %rax, %r13
adcq %rdx, %r9
addq %r10, %r13
adcq $0x00, %r9
# a[i+2] += m[2] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 16(%rsi)
movq 16(%rdi), %r14
addq %rax, %r14
adcq %rdx, %r10
addq %r9, %r14
adcq $0x00, %r10
# a[i+3] += m[3] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 24(%rsi)
movq 24(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 24(%rdi)
adcq $0x00, %r9
# a[i+4] += m[4] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 32(%rsi)
movq 32(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 32(%rdi)
adcq $0x00, %r10
# a[i+5] += m[5] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 40(%rsi)
movq 40(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 40(%rdi)
adcq $0x00, %r9
# a[i+6] += m[6] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 48(%rsi)
movq 48(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 48(%rdi)
adcq $0x00, %r10
# a[i+7] += m[7] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 56(%rsi)
movq 56(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 56(%rdi)
adcq $0x00, %r9
# a[i+8] += m[8] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 64(%rsi)
movq 64(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 64(%rdi)
adcq $0x00, %r10
# a[i+9] += m[9] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 72(%rsi)
movq 72(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 72(%rdi)
adcq $0x00, %r9
# a[i+10] += m[10] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 80(%rsi)
movq 80(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 80(%rdi)
adcq $0x00, %r10
# a[i+11] += m[11] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 88(%rsi)
movq 88(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 88(%rdi)
adcq $0x00, %r9
# a[i+12] += m[12] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 96(%rsi)
movq 96(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 96(%rdi)
adcq $0x00, %r10
# a[i+13] += m[13] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 104(%rsi)
movq 104(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 104(%rdi)
adcq $0x00, %r9
# a[i+14] += m[14] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 112(%rsi)
movq 112(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 112(%rdi)
adcq $0x00, %r10
# a[i+15] += m[15] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 120(%rsi)
movq 120(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 120(%rdi)
adcq $0x00, %r9
# a[i+16] += m[16] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 128(%rsi)
movq 128(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 128(%rdi)
adcq $0x00, %r10
# a[i+17] += m[17] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 136(%rsi)
movq 136(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 136(%rdi)
adcq $0x00, %r9
# a[i+18] += m[18] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 144(%rsi)
movq 144(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 144(%rdi)
adcq $0x00, %r10
# a[i+19] += m[19] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 152(%rsi)
movq 152(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 152(%rdi)
adcq $0x00, %r9
# a[i+20] += m[20] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 160(%rsi)
movq 160(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 160(%rdi)
adcq $0x00, %r10
# a[i+21] += m[21] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 168(%rsi)
movq 168(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 168(%rdi)
adcq $0x00, %r9
# a[i+22] += m[22] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 176(%rsi)
movq 176(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 176(%rdi)
adcq $0x00, %r10
# a[i+23] += m[23] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 184(%rsi)
movq 184(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 184(%rdi)
adcq $0x00, %r9
# a[i+24] += m[24] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 192(%rsi)
movq 192(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 192(%rdi)
adcq $0x00, %r10
# a[i+25] += m[25] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 200(%rsi)
movq 200(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 200(%rdi)
adcq $0x00, %r9
# a[i+26] += m[26] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 208(%rsi)
movq 208(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 208(%rdi)
adcq $0x00, %r10
# a[i+27] += m[27] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 216(%rsi)
movq 216(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 216(%rdi)
adcq $0x00, %r9
# a[i+28] += m[28] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 224(%rsi)
movq 224(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 224(%rdi)
adcq $0x00, %r10
# a[i+29] += m[29] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 232(%rsi)
movq 232(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 232(%rdi)
adcq $0x00, %r9
# a[i+30] += m[30] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 240(%rsi)
movq 240(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 240(%rdi)
adcq $0x00, %r10
# a[i+31] += m[31] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 248(%rsi)
movq 248(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 248(%rdi)
adcq $0x00, %r9
# a[i+32] += m[32] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 256(%rsi)
movq 256(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 256(%rdi)
adcq $0x00, %r10
# a[i+33] += m[33] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 264(%rsi)
movq 264(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 264(%rdi)
adcq $0x00, %r9
# a[i+34] += m[34] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 272(%rsi)
movq 272(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 272(%rdi)
adcq $0x00, %r10
# a[i+35] += m[35] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 280(%rsi)
movq 280(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 280(%rdi)
adcq $0x00, %r9
# a[i+36] += m[36] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 288(%rsi)
movq 288(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 288(%rdi)
adcq $0x00, %r10
# a[i+37] += m[37] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 296(%rsi)
movq 296(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 296(%rdi)
adcq $0x00, %r9
# a[i+38] += m[38] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 304(%rsi)
movq 304(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 304(%rdi)
adcq $0x00, %r10
# a[i+39] += m[39] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 312(%rsi)
movq 312(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 312(%rdi)
adcq $0x00, %r9
# a[i+40] += m[40] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 320(%rsi)
movq 320(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 320(%rdi)
adcq $0x00, %r10
# a[i+41] += m[41] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 328(%rsi)
movq 328(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 328(%rdi)
adcq $0x00, %r9
# a[i+42] += m[42] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 336(%rsi)
movq 336(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 336(%rdi)
adcq $0x00, %r10
# a[i+43] += m[43] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 344(%rsi)
movq 344(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 344(%rdi)
adcq $0x00, %r9
# a[i+44] += m[44] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 352(%rsi)
movq 352(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 352(%rdi)
adcq $0x00, %r10
# a[i+45] += m[45] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 360(%rsi)
movq 360(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 360(%rdi)
adcq $0x00, %r9
# a[i+46] += m[46] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 368(%rsi)
movq 368(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 368(%rdi)
adcq $0x00, %r10
# a[i+47] += m[47] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 376(%rsi)
movq 376(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 376(%rdi)
adcq $0x00, %r9
# a[i+48] += m[48] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 384(%rsi)
movq 384(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 384(%rdi)
adcq $0x00, %r10
# a[i+49] += m[49] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 392(%rsi)
movq 392(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 392(%rdi)
adcq $0x00, %r9
# a[i+50] += m[50] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 400(%rsi)
movq 400(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 400(%rdi)
adcq $0x00, %r10
# a[i+51] += m[51] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 408(%rsi)
movq 408(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 408(%rdi)
adcq $0x00, %r9
# a[i+52] += m[52] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 416(%rsi)
movq 416(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 416(%rdi)
adcq $0x00, %r10
# a[i+53] += m[53] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 424(%rsi)
movq 424(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 424(%rdi)
adcq $0x00, %r9
# a[i+54] += m[54] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 432(%rsi)
movq 432(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 432(%rdi)
adcq $0x00, %r10
# a[i+55] += m[55] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 440(%rsi)
movq 440(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 440(%rdi)
adcq $0x00, %r9
# a[i+56] += m[56] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 448(%rsi)
movq 448(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 448(%rdi)
adcq $0x00, %r10
# a[i+57] += m[57] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 456(%rsi)
movq 456(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 456(%rdi)
adcq $0x00, %r9
# a[i+58] += m[58] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 464(%rsi)
movq 464(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 464(%rdi)
adcq $0x00, %r10
# a[i+59] += m[59] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 472(%rsi)
movq 472(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 472(%rdi)
adcq $0x00, %r9
# a[i+60] += m[60] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 480(%rsi)
movq 480(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 480(%rdi)
adcq $0x00, %r10
# a[i+61] += m[61] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 488(%rsi)
movq 488(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 488(%rdi)
adcq $0x00, %r9
# a[i+62] += m[62] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 496(%rsi)
movq 496(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 496(%rdi)
adcq $0x00, %r10
# a[i+63] += m[63] * mu
movq %r11, %rax
mulq 504(%rsi)
movq 504(%rdi), %r12
addq %rax, %r10
adcq %r15, %rdx
movq $0x00, %r15
adcq $0x00, %r15
addq %r10, %r12
movq %r12, 504(%rdi)
adcq %rdx, 512(%rdi)
adcq $0x00, %r15
# i -= 1
addq $8, %rdi
decq %r8
jnz L_4096_mont_reduce_64_loop
movq %r13, (%rdi)
movq %r14, 8(%rdi)
negq %r15
#ifdef _WIN64
movq %rsi, %rdx
movq %r15, %rcx
#else
movq %r15, %rcx
movq %rsi, %rdx
#endif /* _WIN64 */
movq %rdi, %rsi
movq %rdi, %rdi
subq $0x200, %rdi
#ifndef __APPLE__
callq sp_4096_cond_sub_64@plt
#else
callq _sp_4096_cond_sub_64
#endif /* __APPLE__ */
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_4096_mont_reduce_64,.-sp_4096_mont_reduce_64
#endif /* __APPLE__ */
/* Sub b from a into r. (r = a - b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_4096_sub_64
.type sp_4096_sub_64,@function
.align 16
sp_4096_sub_64:
#else
.section __TEXT,__text
.globl _sp_4096_sub_64
.p2align 4
_sp_4096_sub_64:
#endif /* __APPLE__ */
movq (%rsi), %rcx
subq (%rdx), %rcx
movq 8(%rsi), %r8
movq %rcx, (%rdi)
sbbq 8(%rdx), %r8
movq 16(%rsi), %rcx
movq %r8, 8(%rdi)
sbbq 16(%rdx), %rcx
movq 24(%rsi), %r8
movq %rcx, 16(%rdi)
sbbq 24(%rdx), %r8
movq 32(%rsi), %rcx
movq %r8, 24(%rdi)
sbbq 32(%rdx), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%rdi)
sbbq 40(%rdx), %r8
movq 48(%rsi), %rcx
movq %r8, 40(%rdi)
sbbq 48(%rdx), %rcx
movq 56(%rsi), %r8
movq %rcx, 48(%rdi)
sbbq 56(%rdx), %r8
movq 64(%rsi), %rcx
movq %r8, 56(%rdi)
sbbq 64(%rdx), %rcx
movq 72(%rsi), %r8
movq %rcx, 64(%rdi)
sbbq 72(%rdx), %r8
movq 80(%rsi), %rcx
movq %r8, 72(%rdi)
sbbq 80(%rdx), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%rdi)
sbbq 88(%rdx), %r8
movq 96(%rsi), %rcx
movq %r8, 88(%rdi)
sbbq 96(%rdx), %rcx
movq 104(%rsi), %r8
movq %rcx, 96(%rdi)
sbbq 104(%rdx), %r8
movq 112(%rsi), %rcx
movq %r8, 104(%rdi)
sbbq 112(%rdx), %rcx
movq 120(%rsi), %r8
movq %rcx, 112(%rdi)
sbbq 120(%rdx), %r8
movq 128(%rsi), %rcx
movq %r8, 120(%rdi)
sbbq 128(%rdx), %rcx
movq 136(%rsi), %r8
movq %rcx, 128(%rdi)
sbbq 136(%rdx), %r8
movq 144(%rsi), %rcx
movq %r8, 136(%rdi)
sbbq 144(%rdx), %rcx
movq 152(%rsi), %r8
movq %rcx, 144(%rdi)
sbbq 152(%rdx), %r8
movq 160(%rsi), %rcx
movq %r8, 152(%rdi)
sbbq 160(%rdx), %rcx
movq 168(%rsi), %r8
movq %rcx, 160(%rdi)
sbbq 168(%rdx), %r8
movq 176(%rsi), %rcx
movq %r8, 168(%rdi)
sbbq 176(%rdx), %rcx
movq 184(%rsi), %r8
movq %rcx, 176(%rdi)
sbbq 184(%rdx), %r8
movq 192(%rsi), %rcx
movq %r8, 184(%rdi)
sbbq 192(%rdx), %rcx
movq 200(%rsi), %r8
movq %rcx, 192(%rdi)
sbbq 200(%rdx), %r8
movq 208(%rsi), %rcx
movq %r8, 200(%rdi)
sbbq 208(%rdx), %rcx
movq 216(%rsi), %r8
movq %rcx, 208(%rdi)
sbbq 216(%rdx), %r8
movq 224(%rsi), %rcx
movq %r8, 216(%rdi)
sbbq 224(%rdx), %rcx
movq 232(%rsi), %r8
movq %rcx, 224(%rdi)
sbbq 232(%rdx), %r8
movq 240(%rsi), %rcx
movq %r8, 232(%rdi)
sbbq 240(%rdx), %rcx
movq 248(%rsi), %r8
movq %rcx, 240(%rdi)
sbbq 248(%rdx), %r8
movq 256(%rsi), %rcx
movq %r8, 248(%rdi)
sbbq 256(%rdx), %rcx
movq 264(%rsi), %r8
movq %rcx, 256(%rdi)
sbbq 264(%rdx), %r8
movq 272(%rsi), %rcx
movq %r8, 264(%rdi)
sbbq 272(%rdx), %rcx
movq 280(%rsi), %r8
movq %rcx, 272(%rdi)
sbbq 280(%rdx), %r8
movq 288(%rsi), %rcx
movq %r8, 280(%rdi)
sbbq 288(%rdx), %rcx
movq 296(%rsi), %r8
movq %rcx, 288(%rdi)
sbbq 296(%rdx), %r8
movq 304(%rsi), %rcx
movq %r8, 296(%rdi)
sbbq 304(%rdx), %rcx
movq 312(%rsi), %r8
movq %rcx, 304(%rdi)
sbbq 312(%rdx), %r8
movq 320(%rsi), %rcx
movq %r8, 312(%rdi)
sbbq 320(%rdx), %rcx
movq 328(%rsi), %r8
movq %rcx, 320(%rdi)
sbbq 328(%rdx), %r8
movq 336(%rsi), %rcx
movq %r8, 328(%rdi)
sbbq 336(%rdx), %rcx
movq 344(%rsi), %r8
movq %rcx, 336(%rdi)
sbbq 344(%rdx), %r8
movq 352(%rsi), %rcx
movq %r8, 344(%rdi)
sbbq 352(%rdx), %rcx
movq 360(%rsi), %r8
movq %rcx, 352(%rdi)
sbbq 360(%rdx), %r8
movq 368(%rsi), %rcx
movq %r8, 360(%rdi)
sbbq 368(%rdx), %rcx
movq 376(%rsi), %r8
movq %rcx, 368(%rdi)
sbbq 376(%rdx), %r8
movq 384(%rsi), %rcx
movq %r8, 376(%rdi)
sbbq 384(%rdx), %rcx
movq 392(%rsi), %r8
movq %rcx, 384(%rdi)
sbbq 392(%rdx), %r8
movq 400(%rsi), %rcx
movq %r8, 392(%rdi)
sbbq 400(%rdx), %rcx
movq 408(%rsi), %r8
movq %rcx, 400(%rdi)
sbbq 408(%rdx), %r8
movq 416(%rsi), %rcx
movq %r8, 408(%rdi)
sbbq 416(%rdx), %rcx
movq 424(%rsi), %r8
movq %rcx, 416(%rdi)
sbbq 424(%rdx), %r8
movq 432(%rsi), %rcx
movq %r8, 424(%rdi)
sbbq 432(%rdx), %rcx
movq 440(%rsi), %r8
movq %rcx, 432(%rdi)
sbbq 440(%rdx), %r8
movq 448(%rsi), %rcx
movq %r8, 440(%rdi)
sbbq 448(%rdx), %rcx
movq 456(%rsi), %r8
movq %rcx, 448(%rdi)
sbbq 456(%rdx), %r8
movq 464(%rsi), %rcx
movq %r8, 456(%rdi)
sbbq 464(%rdx), %rcx
movq 472(%rsi), %r8
movq %rcx, 464(%rdi)
sbbq 472(%rdx), %r8
movq 480(%rsi), %rcx
movq %r8, 472(%rdi)
sbbq 480(%rdx), %rcx
movq 488(%rsi), %r8
movq %rcx, 480(%rdi)
sbbq 488(%rdx), %r8
movq 496(%rsi), %rcx
movq %r8, 488(%rdi)
sbbq 496(%rdx), %rcx
movq 504(%rsi), %r8
movq %rcx, 496(%rdi)
sbbq 504(%rdx), %r8
movq %r8, 504(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_4096_sub_64,.-sp_4096_sub_64
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_4096_mul_d_avx2_64
.type sp_4096_mul_d_avx2_64,@function
.align 16
sp_4096_mul_d_avx2_64:
#else
.section __TEXT,__text
.globl _sp_4096_mul_d_avx2_64
.p2align 4
_sp_4096_mul_d_avx2_64:
#endif /* __APPLE__ */
movq %rdx, %rax
# A[0] * B
movq %rax, %rdx
xorq %r11, %r11
mulxq (%rsi), %r9, %r10
movq %r9, (%rdi)
# A[1] * B
mulxq 8(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 8(%rdi)
# A[2] * B
mulxq 16(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 16(%rdi)
# A[3] * B
mulxq 24(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 24(%rdi)
# A[4] * B
mulxq 32(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 32(%rdi)
# A[5] * B
mulxq 40(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 40(%rdi)
# A[6] * B
mulxq 48(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 48(%rdi)
# A[7] * B
mulxq 56(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 56(%rdi)
# A[8] * B
mulxq 64(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 64(%rdi)
# A[9] * B
mulxq 72(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 72(%rdi)
# A[10] * B
mulxq 80(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 80(%rdi)
# A[11] * B
mulxq 88(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 88(%rdi)
# A[12] * B
mulxq 96(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 96(%rdi)
# A[13] * B
mulxq 104(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 104(%rdi)
# A[14] * B
mulxq 112(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 112(%rdi)
# A[15] * B
mulxq 120(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 120(%rdi)
# A[16] * B
mulxq 128(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 128(%rdi)
# A[17] * B
mulxq 136(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 136(%rdi)
# A[18] * B
mulxq 144(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 144(%rdi)
# A[19] * B
mulxq 152(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 152(%rdi)
# A[20] * B
mulxq 160(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 160(%rdi)
# A[21] * B
mulxq 168(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 168(%rdi)
# A[22] * B
mulxq 176(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 176(%rdi)
# A[23] * B
mulxq 184(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 184(%rdi)
# A[24] * B
mulxq 192(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 192(%rdi)
# A[25] * B
mulxq 200(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 200(%rdi)
# A[26] * B
mulxq 208(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 208(%rdi)
# A[27] * B
mulxq 216(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 216(%rdi)
# A[28] * B
mulxq 224(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 224(%rdi)
# A[29] * B
mulxq 232(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 232(%rdi)
# A[30] * B
mulxq 240(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 240(%rdi)
# A[31] * B
mulxq 248(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 248(%rdi)
# A[32] * B
mulxq 256(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 256(%rdi)
# A[33] * B
mulxq 264(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 264(%rdi)
# A[34] * B
mulxq 272(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 272(%rdi)
# A[35] * B
mulxq 280(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 280(%rdi)
# A[36] * B
mulxq 288(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 288(%rdi)
# A[37] * B
mulxq 296(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 296(%rdi)
# A[38] * B
mulxq 304(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 304(%rdi)
# A[39] * B
mulxq 312(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 312(%rdi)
# A[40] * B
mulxq 320(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 320(%rdi)
# A[41] * B
mulxq 328(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 328(%rdi)
# A[42] * B
mulxq 336(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 336(%rdi)
# A[43] * B
mulxq 344(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 344(%rdi)
# A[44] * B
mulxq 352(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 352(%rdi)
# A[45] * B
mulxq 360(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 360(%rdi)
# A[46] * B
mulxq 368(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 368(%rdi)
# A[47] * B
mulxq 376(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 376(%rdi)
# A[48] * B
mulxq 384(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 384(%rdi)
# A[49] * B
mulxq 392(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 392(%rdi)
# A[50] * B
mulxq 400(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 400(%rdi)
# A[51] * B
mulxq 408(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 408(%rdi)
# A[52] * B
mulxq 416(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 416(%rdi)
# A[53] * B
mulxq 424(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 424(%rdi)
# A[54] * B
mulxq 432(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 432(%rdi)
# A[55] * B
mulxq 440(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 440(%rdi)
# A[56] * B
mulxq 448(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 448(%rdi)
# A[57] * B
mulxq 456(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 456(%rdi)
# A[58] * B
mulxq 464(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 464(%rdi)
# A[59] * B
mulxq 472(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 472(%rdi)
# A[60] * B
mulxq 480(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 480(%rdi)
# A[61] * B
mulxq 488(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 488(%rdi)
# A[62] * B
mulxq 496(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 496(%rdi)
# A[63] * B
mulxq 504(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
adcxq %r11, %r9
movq %r10, 504(%rdi)
movq %r9, 512(%rdi)
repz retq
#ifndef __APPLE__
.size sp_4096_mul_d_avx2_64,.-sp_4096_mul_d_avx2_64
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef _WIN64
/* Divide the double width number (d1|d0) by the dividend. (d1|d0 / div)
*
* d1 The high order half of the number to divide.
* d0 The low order half of the number to divide.
* div The dividend.
* returns the result of the division.
*/
#ifndef __APPLE__
.text
.globl div_4096_word_asm_64
.type div_4096_word_asm_64,@function
.align 16
div_4096_word_asm_64:
#else
.section __TEXT,__text
.globl _div_4096_word_asm_64
.p2align 4
_div_4096_word_asm_64:
#endif /* __APPLE__ */
movq %rdx, %rcx
movq %rsi, %rax
movq %rdi, %rdx
divq %rcx
repz retq
#ifndef __APPLE__
.size div_4096_word_asm_64,.-div_4096_word_asm_64
#endif /* __APPLE__ */
#endif /* _WIN64 */
#ifdef HAVE_INTEL_AVX2
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_4096_cond_sub_avx2_64
.type sp_4096_cond_sub_avx2_64,@function
.align 16
sp_4096_cond_sub_avx2_64:
#else
.section __TEXT,__text
.globl _sp_4096_cond_sub_avx2_64
.p2align 4
_sp_4096_cond_sub_avx2_64:
#endif /* __APPLE__ */
movq (%rdx), %r10
movq (%rsi), %r8
pextq %rcx, %r10, %r10
subq %r10, %r8
movq 8(%rdx), %r10
movq 8(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, (%rdi)
sbbq %r10, %r9
movq 16(%rdx), %r8
movq 16(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 8(%rdi)
sbbq %r8, %r10
movq 24(%rdx), %r9
movq 24(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 16(%rdi)
sbbq %r9, %r8
movq 32(%rdx), %r10
movq 32(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 24(%rdi)
sbbq %r10, %r9
movq 40(%rdx), %r8
movq 40(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 32(%rdi)
sbbq %r8, %r10
movq 48(%rdx), %r9
movq 48(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 40(%rdi)
sbbq %r9, %r8
movq 56(%rdx), %r10
movq 56(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 48(%rdi)
sbbq %r10, %r9
movq 64(%rdx), %r8
movq 64(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 56(%rdi)
sbbq %r8, %r10
movq 72(%rdx), %r9
movq 72(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 64(%rdi)
sbbq %r9, %r8
movq 80(%rdx), %r10
movq 80(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 72(%rdi)
sbbq %r10, %r9
movq 88(%rdx), %r8
movq 88(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 80(%rdi)
sbbq %r8, %r10
movq 96(%rdx), %r9
movq 96(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 88(%rdi)
sbbq %r9, %r8
movq 104(%rdx), %r10
movq 104(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 96(%rdi)
sbbq %r10, %r9
movq 112(%rdx), %r8
movq 112(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 104(%rdi)
sbbq %r8, %r10
movq 120(%rdx), %r9
movq 120(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 112(%rdi)
sbbq %r9, %r8
movq 128(%rdx), %r10
movq 128(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 120(%rdi)
sbbq %r10, %r9
movq 136(%rdx), %r8
movq 136(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 128(%rdi)
sbbq %r8, %r10
movq 144(%rdx), %r9
movq 144(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 136(%rdi)
sbbq %r9, %r8
movq 152(%rdx), %r10
movq 152(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 144(%rdi)
sbbq %r10, %r9
movq 160(%rdx), %r8
movq 160(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 152(%rdi)
sbbq %r8, %r10
movq 168(%rdx), %r9
movq 168(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 160(%rdi)
sbbq %r9, %r8
movq 176(%rdx), %r10
movq 176(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 168(%rdi)
sbbq %r10, %r9
movq 184(%rdx), %r8
movq 184(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 176(%rdi)
sbbq %r8, %r10
movq 192(%rdx), %r9
movq 192(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 184(%rdi)
sbbq %r9, %r8
movq 200(%rdx), %r10
movq 200(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 192(%rdi)
sbbq %r10, %r9
movq 208(%rdx), %r8
movq 208(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 200(%rdi)
sbbq %r8, %r10
movq 216(%rdx), %r9
movq 216(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 208(%rdi)
sbbq %r9, %r8
movq 224(%rdx), %r10
movq 224(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 216(%rdi)
sbbq %r10, %r9
movq 232(%rdx), %r8
movq 232(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 224(%rdi)
sbbq %r8, %r10
movq 240(%rdx), %r9
movq 240(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 232(%rdi)
sbbq %r9, %r8
movq 248(%rdx), %r10
movq 248(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 240(%rdi)
sbbq %r10, %r9
movq 256(%rdx), %r8
movq 256(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 248(%rdi)
sbbq %r8, %r10
movq 264(%rdx), %r9
movq 264(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 256(%rdi)
sbbq %r9, %r8
movq 272(%rdx), %r10
movq 272(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 264(%rdi)
sbbq %r10, %r9
movq 280(%rdx), %r8
movq 280(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 272(%rdi)
sbbq %r8, %r10
movq 288(%rdx), %r9
movq 288(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 280(%rdi)
sbbq %r9, %r8
movq 296(%rdx), %r10
movq 296(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 288(%rdi)
sbbq %r10, %r9
movq 304(%rdx), %r8
movq 304(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 296(%rdi)
sbbq %r8, %r10
movq 312(%rdx), %r9
movq 312(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 304(%rdi)
sbbq %r9, %r8
movq 320(%rdx), %r10
movq 320(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 312(%rdi)
sbbq %r10, %r9
movq 328(%rdx), %r8
movq 328(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 320(%rdi)
sbbq %r8, %r10
movq 336(%rdx), %r9
movq 336(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 328(%rdi)
sbbq %r9, %r8
movq 344(%rdx), %r10
movq 344(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 336(%rdi)
sbbq %r10, %r9
movq 352(%rdx), %r8
movq 352(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 344(%rdi)
sbbq %r8, %r10
movq 360(%rdx), %r9
movq 360(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 352(%rdi)
sbbq %r9, %r8
movq 368(%rdx), %r10
movq 368(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 360(%rdi)
sbbq %r10, %r9
movq 376(%rdx), %r8
movq 376(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 368(%rdi)
sbbq %r8, %r10
movq 384(%rdx), %r9
movq 384(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 376(%rdi)
sbbq %r9, %r8
movq 392(%rdx), %r10
movq 392(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 384(%rdi)
sbbq %r10, %r9
movq 400(%rdx), %r8
movq 400(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 392(%rdi)
sbbq %r8, %r10
movq 408(%rdx), %r9
movq 408(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 400(%rdi)
sbbq %r9, %r8
movq 416(%rdx), %r10
movq 416(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 408(%rdi)
sbbq %r10, %r9
movq 424(%rdx), %r8
movq 424(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 416(%rdi)
sbbq %r8, %r10
movq 432(%rdx), %r9
movq 432(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 424(%rdi)
sbbq %r9, %r8
movq 440(%rdx), %r10
movq 440(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 432(%rdi)
sbbq %r10, %r9
movq 448(%rdx), %r8
movq 448(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 440(%rdi)
sbbq %r8, %r10
movq 456(%rdx), %r9
movq 456(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 448(%rdi)
sbbq %r9, %r8
movq 464(%rdx), %r10
movq 464(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 456(%rdi)
sbbq %r10, %r9
movq 472(%rdx), %r8
movq 472(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 464(%rdi)
sbbq %r8, %r10
movq 480(%rdx), %r9
movq 480(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 472(%rdi)
sbbq %r9, %r8
movq 488(%rdx), %r10
movq 488(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 480(%rdi)
sbbq %r10, %r9
movq 496(%rdx), %r8
movq 496(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 488(%rdi)
sbbq %r8, %r10
movq 504(%rdx), %r9
movq 504(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 496(%rdi)
sbbq %r9, %r8
movq %r8, 504(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_4096_cond_sub_avx2_64,.-sp_4096_cond_sub_avx2_64
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Compare a with b in constant time.
*
* a A single precision integer.
* b A single precision integer.
* return -ve, 0 or +ve if a is less than, equal to or greater than b
* respectively.
*/
#ifndef __APPLE__
.text
.globl sp_4096_cmp_64
.type sp_4096_cmp_64,@function
.align 16
sp_4096_cmp_64:
#else
.section __TEXT,__text
.globl _sp_4096_cmp_64
.p2align 4
_sp_4096_cmp_64:
#endif /* __APPLE__ */
xorq %rcx, %rcx
movq $-1, %rdx
movq $-1, %rax
movq $0x01, %r8
movq 504(%rdi), %r9
movq 504(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 496(%rdi), %r9
movq 496(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 488(%rdi), %r9
movq 488(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 480(%rdi), %r9
movq 480(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 472(%rdi), %r9
movq 472(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 464(%rdi), %r9
movq 464(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 456(%rdi), %r9
movq 456(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 448(%rdi), %r9
movq 448(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 440(%rdi), %r9
movq 440(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 432(%rdi), %r9
movq 432(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 424(%rdi), %r9
movq 424(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 416(%rdi), %r9
movq 416(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 408(%rdi), %r9
movq 408(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 400(%rdi), %r9
movq 400(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 392(%rdi), %r9
movq 392(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 384(%rdi), %r9
movq 384(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 376(%rdi), %r9
movq 376(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 368(%rdi), %r9
movq 368(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 360(%rdi), %r9
movq 360(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 352(%rdi), %r9
movq 352(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 344(%rdi), %r9
movq 344(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 336(%rdi), %r9
movq 336(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 328(%rdi), %r9
movq 328(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 320(%rdi), %r9
movq 320(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 312(%rdi), %r9
movq 312(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 304(%rdi), %r9
movq 304(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 296(%rdi), %r9
movq 296(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 288(%rdi), %r9
movq 288(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 280(%rdi), %r9
movq 280(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 272(%rdi), %r9
movq 272(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 264(%rdi), %r9
movq 264(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 256(%rdi), %r9
movq 256(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 248(%rdi), %r9
movq 248(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 240(%rdi), %r9
movq 240(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 232(%rdi), %r9
movq 232(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 224(%rdi), %r9
movq 224(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 216(%rdi), %r9
movq 216(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 208(%rdi), %r9
movq 208(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 200(%rdi), %r9
movq 200(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 192(%rdi), %r9
movq 192(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 184(%rdi), %r9
movq 184(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 176(%rdi), %r9
movq 176(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 168(%rdi), %r9
movq 168(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 160(%rdi), %r9
movq 160(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 152(%rdi), %r9
movq 152(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 144(%rdi), %r9
movq 144(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 136(%rdi), %r9
movq 136(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 128(%rdi), %r9
movq 128(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 120(%rdi), %r9
movq 120(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 112(%rdi), %r9
movq 112(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 104(%rdi), %r9
movq 104(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 96(%rdi), %r9
movq 96(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 88(%rdi), %r9
movq 88(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 80(%rdi), %r9
movq 80(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 72(%rdi), %r9
movq 72(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 64(%rdi), %r9
movq 64(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 56(%rdi), %r9
movq 56(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 48(%rdi), %r9
movq 48(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 40(%rdi), %r9
movq 40(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 32(%rdi), %r9
movq 32(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 24(%rdi), %r9
movq 24(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 16(%rdi), %r9
movq 16(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 8(%rdi), %r9
movq 8(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq (%rdi), %r9
movq (%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
xorq %rdx, %rax
repz retq
#ifndef __APPLE__
.size sp_4096_cmp_64,.-sp_4096_cmp_64
#endif /* __APPLE__ */
#ifndef WC_NO_CACHE_RESISTANT
#ifndef __APPLE__
.text
.globl sp_4096_get_from_table_64
.type sp_4096_get_from_table_64,@function
.align 16
sp_4096_get_from_table_64:
#else
.section __TEXT,__text
.globl _sp_4096_get_from_table_64
.p2align 4
_sp_4096_get_from_table_64:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %rdx, %xmm10
movd %rax, %xmm11
pxor %xmm13, %xmm13
pshufd $0x00, %xmm11, %xmm11
pshufd $0x00, %xmm10, %xmm10
# START: 0-7
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 0-7
# START: 8-15
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x40, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 8-15
# START: 16-23
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x80, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 16-23
# START: 24-31
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0xc0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 24-31
# START: 32-39
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x100, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 32-39
# START: 40-47
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x140, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 40-47
# START: 48-55
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x180, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
addq $0x40, %rdi
# END: 48-55
# START: 56-63
pxor %xmm13, %xmm13
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x1c0, %rcx
movdqu %xmm13, %xmm12
pcmpeqd %xmm10, %xmm12
movdqu (%rcx), %xmm0
movdqu 16(%rcx), %xmm1
movdqu 32(%rcx), %xmm2
movdqu 48(%rcx), %xmm3
pand %xmm12, %xmm0
pand %xmm12, %xmm1
pand %xmm12, %xmm2
pand %xmm12, %xmm3
por %xmm0, %xmm4
por %xmm1, %xmm5
por %xmm2, %xmm6
por %xmm3, %xmm7
paddd %xmm11, %xmm13
movdqu %xmm4, (%rdi)
movdqu %xmm5, 16(%rdi)
movdqu %xmm6, 32(%rdi)
movdqu %xmm7, 48(%rdi)
# END: 56-63
repz retq
#ifndef __APPLE__
.size sp_4096_get_from_table_64,.-sp_4096_get_from_table_64
#endif /* __APPLE__ */
#endif /* !WC_NO_CACHE_RESISTANT */
#ifdef HAVE_INTEL_AVX2
/* Reduce the number back to 4096 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_4096_mont_reduce_avx2_64
.type sp_4096_mont_reduce_avx2_64,@function
.align 16
sp_4096_mont_reduce_avx2_64:
#else
.section __TEXT,__text
.globl _sp_4096_mont_reduce_avx2_64
.p2align 4
_sp_4096_mont_reduce_avx2_64:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %r8
xorq %rbp, %rbp
# i = 64
movq $0x40, %r9
movq (%rdi), %r12
movq 8(%rdi), %r13
movq 16(%rdi), %r14
movq 24(%rdi), %r15
addq $0x100, %rdi
xorq %rbp, %rbp
L_4096_mont_reduce_avx2_64_loop:
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r10
imulq %r8, %rdx
xorq %rbx, %rbx
# a[i+0] += m[0] * mu
mulxq (%rsi), %rax, %rcx
movq %r13, %r12
adcxq %rax, %r10
adoxq %rcx, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rax, %rcx
movq %r14, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rax, %rcx
movq %r15, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rax, %rcx
movq -224(%rdi), %r15
adcxq %rax, %r14
adoxq %rcx, %r15
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rax, %rcx
movq -216(%rdi), %r11
adcxq %rax, %r15
adoxq %rcx, %r11
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rax, %rcx
movq -208(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -216(%rdi)
# a[i+6] += m[6] * mu
mulxq 48(%rsi), %rax, %rcx
movq -200(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -208(%rdi)
# a[i+7] += m[7] * mu
mulxq 56(%rsi), %rax, %rcx
movq -192(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -200(%rdi)
# a[i+8] += m[8] * mu
mulxq 64(%rsi), %rax, %rcx
movq -184(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -192(%rdi)
# a[i+9] += m[9] * mu
mulxq 72(%rsi), %rax, %rcx
movq -176(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -184(%rdi)
# a[i+10] += m[10] * mu
mulxq 80(%rsi), %rax, %rcx
movq -168(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -176(%rdi)
# a[i+11] += m[11] * mu
mulxq 88(%rsi), %rax, %rcx
movq -160(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -168(%rdi)
# a[i+12] += m[12] * mu
mulxq 96(%rsi), %rax, %rcx
movq -152(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -160(%rdi)
# a[i+13] += m[13] * mu
mulxq 104(%rsi), %rax, %rcx
movq -144(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -152(%rdi)
# a[i+14] += m[14] * mu
mulxq 112(%rsi), %rax, %rcx
movq -136(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -144(%rdi)
# a[i+15] += m[15] * mu
mulxq 120(%rsi), %rax, %rcx
movq -128(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -136(%rdi)
# a[i+16] += m[16] * mu
mulxq 128(%rsi), %rax, %rcx
movq -120(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -128(%rdi)
# a[i+17] += m[17] * mu
mulxq 136(%rsi), %rax, %rcx
movq -112(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -120(%rdi)
# a[i+18] += m[18] * mu
mulxq 144(%rsi), %rax, %rcx
movq -104(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -112(%rdi)
# a[i+19] += m[19] * mu
mulxq 152(%rsi), %rax, %rcx
movq -96(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -104(%rdi)
# a[i+20] += m[20] * mu
mulxq 160(%rsi), %rax, %rcx
movq -88(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -96(%rdi)
# a[i+21] += m[21] * mu
mulxq 168(%rsi), %rax, %rcx
movq -80(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -88(%rdi)
# a[i+22] += m[22] * mu
mulxq 176(%rsi), %rax, %rcx
movq -72(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -80(%rdi)
# a[i+23] += m[23] * mu
mulxq 184(%rsi), %rax, %rcx
movq -64(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -72(%rdi)
# a[i+24] += m[24] * mu
mulxq 192(%rsi), %rax, %rcx
movq -56(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -64(%rdi)
# a[i+25] += m[25] * mu
mulxq 200(%rsi), %rax, %rcx
movq -48(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -56(%rdi)
# a[i+26] += m[26] * mu
mulxq 208(%rsi), %rax, %rcx
movq -40(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -48(%rdi)
# a[i+27] += m[27] * mu
mulxq 216(%rsi), %rax, %rcx
movq -32(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -40(%rdi)
# a[i+28] += m[28] * mu
mulxq 224(%rsi), %rax, %rcx
movq -24(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -32(%rdi)
# a[i+29] += m[29] * mu
mulxq 232(%rsi), %rax, %rcx
movq -16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -24(%rdi)
# a[i+30] += m[30] * mu
mulxq 240(%rsi), %rax, %rcx
movq -8(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -16(%rdi)
# a[i+31] += m[31] * mu
mulxq 248(%rsi), %rax, %rcx
movq (%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -8(%rdi)
# a[i+32] += m[32] * mu
mulxq 256(%rsi), %rax, %rcx
movq 8(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, (%rdi)
# a[i+33] += m[33] * mu
mulxq 264(%rsi), %rax, %rcx
movq 16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 8(%rdi)
# a[i+34] += m[34] * mu
mulxq 272(%rsi), %rax, %rcx
movq 24(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 16(%rdi)
# a[i+35] += m[35] * mu
mulxq 280(%rsi), %rax, %rcx
movq 32(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 24(%rdi)
# a[i+36] += m[36] * mu
mulxq 288(%rsi), %rax, %rcx
movq 40(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 32(%rdi)
# a[i+37] += m[37] * mu
mulxq 296(%rsi), %rax, %rcx
movq 48(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 40(%rdi)
# a[i+38] += m[38] * mu
mulxq 304(%rsi), %rax, %rcx
movq 56(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rdi)
# a[i+39] += m[39] * mu
mulxq 312(%rsi), %rax, %rcx
movq 64(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 56(%rdi)
# a[i+40] += m[40] * mu
mulxq 320(%rsi), %rax, %rcx
movq 72(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 64(%rdi)
# a[i+41] += m[41] * mu
mulxq 328(%rsi), %rax, %rcx
movq 80(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 72(%rdi)
# a[i+42] += m[42] * mu
mulxq 336(%rsi), %rax, %rcx
movq 88(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 80(%rdi)
# a[i+43] += m[43] * mu
mulxq 344(%rsi), %rax, %rcx
movq 96(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 88(%rdi)
# a[i+44] += m[44] * mu
mulxq 352(%rsi), %rax, %rcx
movq 104(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 96(%rdi)
# a[i+45] += m[45] * mu
mulxq 360(%rsi), %rax, %rcx
movq 112(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 104(%rdi)
# a[i+46] += m[46] * mu
mulxq 368(%rsi), %rax, %rcx
movq 120(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 112(%rdi)
# a[i+47] += m[47] * mu
mulxq 376(%rsi), %rax, %rcx
movq 128(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 120(%rdi)
# a[i+48] += m[48] * mu
mulxq 384(%rsi), %rax, %rcx
movq 136(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 128(%rdi)
# a[i+49] += m[49] * mu
mulxq 392(%rsi), %rax, %rcx
movq 144(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 136(%rdi)
# a[i+50] += m[50] * mu
mulxq 400(%rsi), %rax, %rcx
movq 152(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 144(%rdi)
# a[i+51] += m[51] * mu
mulxq 408(%rsi), %rax, %rcx
movq 160(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 152(%rdi)
# a[i+52] += m[52] * mu
mulxq 416(%rsi), %rax, %rcx
movq 168(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 160(%rdi)
# a[i+53] += m[53] * mu
mulxq 424(%rsi), %rax, %rcx
movq 176(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 168(%rdi)
# a[i+54] += m[54] * mu
mulxq 432(%rsi), %rax, %rcx
movq 184(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 176(%rdi)
# a[i+55] += m[55] * mu
mulxq 440(%rsi), %rax, %rcx
movq 192(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 184(%rdi)
# a[i+56] += m[56] * mu
mulxq 448(%rsi), %rax, %rcx
movq 200(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 192(%rdi)
# a[i+57] += m[57] * mu
mulxq 456(%rsi), %rax, %rcx
movq 208(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 200(%rdi)
# a[i+58] += m[58] * mu
mulxq 464(%rsi), %rax, %rcx
movq 216(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 208(%rdi)
# a[i+59] += m[59] * mu
mulxq 472(%rsi), %rax, %rcx
movq 224(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 216(%rdi)
# a[i+60] += m[60] * mu
mulxq 480(%rsi), %rax, %rcx
movq 232(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 224(%rdi)
# a[i+61] += m[61] * mu
mulxq 488(%rsi), %rax, %rcx
movq 240(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 232(%rdi)
# a[i+62] += m[62] * mu
mulxq 496(%rsi), %rax, %rcx
movq 248(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 240(%rdi)
# a[i+63] += m[63] * mu
mulxq 504(%rsi), %rax, %rcx
movq 256(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 248(%rdi)
adcxq %rbp, %r10
movq %rbx, %rbp
movq %r10, 256(%rdi)
adoxq %rbx, %rbp
adcxq %rbx, %rbp
# a += 1
addq $8, %rdi
# i -= 1
subq $0x01, %r9
jnz L_4096_mont_reduce_avx2_64_loop
subq $0x100, %rdi
negq %rbp
movq %rdi, %r8
subq $0x200, %rdi
movq (%rsi), %rcx
movq %r12, %rdx
pextq %rbp, %rcx, %rcx
subq %rcx, %rdx
movq 8(%rsi), %rcx
movq %r13, %rax
pextq %rbp, %rcx, %rcx
movq %rdx, (%rdi)
sbbq %rcx, %rax
movq 16(%rsi), %rdx
movq %r14, %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 8(%rdi)
sbbq %rdx, %rcx
movq 24(%rsi), %rax
movq %r15, %rdx
pextq %rbp, %rax, %rax
movq %rcx, 16(%rdi)
sbbq %rax, %rdx
movq 32(%rsi), %rcx
movq 32(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 24(%rdi)
sbbq %rcx, %rax
movq 40(%rsi), %rdx
movq 40(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 32(%rdi)
sbbq %rdx, %rcx
movq 48(%rsi), %rax
movq 48(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 40(%rdi)
sbbq %rax, %rdx
movq 56(%rsi), %rcx
movq 56(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 48(%rdi)
sbbq %rcx, %rax
movq 64(%rsi), %rdx
movq 64(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 56(%rdi)
sbbq %rdx, %rcx
movq 72(%rsi), %rax
movq 72(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 64(%rdi)
sbbq %rax, %rdx
movq 80(%rsi), %rcx
movq 80(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 72(%rdi)
sbbq %rcx, %rax
movq 88(%rsi), %rdx
movq 88(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 80(%rdi)
sbbq %rdx, %rcx
movq 96(%rsi), %rax
movq 96(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 88(%rdi)
sbbq %rax, %rdx
movq 104(%rsi), %rcx
movq 104(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 96(%rdi)
sbbq %rcx, %rax
movq 112(%rsi), %rdx
movq 112(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 104(%rdi)
sbbq %rdx, %rcx
movq 120(%rsi), %rax
movq 120(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 112(%rdi)
sbbq %rax, %rdx
movq 128(%rsi), %rcx
movq 128(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 120(%rdi)
sbbq %rcx, %rax
movq 136(%rsi), %rdx
movq 136(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 128(%rdi)
sbbq %rdx, %rcx
movq 144(%rsi), %rax
movq 144(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 136(%rdi)
sbbq %rax, %rdx
movq 152(%rsi), %rcx
movq 152(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 144(%rdi)
sbbq %rcx, %rax
movq 160(%rsi), %rdx
movq 160(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 152(%rdi)
sbbq %rdx, %rcx
movq 168(%rsi), %rax
movq 168(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 160(%rdi)
sbbq %rax, %rdx
movq 176(%rsi), %rcx
movq 176(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 168(%rdi)
sbbq %rcx, %rax
movq 184(%rsi), %rdx
movq 184(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 176(%rdi)
sbbq %rdx, %rcx
movq 192(%rsi), %rax
movq 192(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 184(%rdi)
sbbq %rax, %rdx
movq 200(%rsi), %rcx
movq 200(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 192(%rdi)
sbbq %rcx, %rax
movq 208(%rsi), %rdx
movq 208(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 200(%rdi)
sbbq %rdx, %rcx
movq 216(%rsi), %rax
movq 216(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 208(%rdi)
sbbq %rax, %rdx
movq 224(%rsi), %rcx
movq 224(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 216(%rdi)
sbbq %rcx, %rax
movq 232(%rsi), %rdx
movq 232(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 224(%rdi)
sbbq %rdx, %rcx
movq 240(%rsi), %rax
movq 240(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 232(%rdi)
sbbq %rax, %rdx
movq 248(%rsi), %rcx
movq 248(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 240(%rdi)
sbbq %rcx, %rax
movq 256(%rsi), %rdx
movq 256(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 248(%rdi)
sbbq %rdx, %rcx
movq 264(%rsi), %rax
movq 264(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 256(%rdi)
sbbq %rax, %rdx
movq 272(%rsi), %rcx
movq 272(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 264(%rdi)
sbbq %rcx, %rax
movq 280(%rsi), %rdx
movq 280(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 272(%rdi)
sbbq %rdx, %rcx
movq 288(%rsi), %rax
movq 288(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 280(%rdi)
sbbq %rax, %rdx
movq 296(%rsi), %rcx
movq 296(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 288(%rdi)
sbbq %rcx, %rax
movq 304(%rsi), %rdx
movq 304(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 296(%rdi)
sbbq %rdx, %rcx
movq 312(%rsi), %rax
movq 312(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 304(%rdi)
sbbq %rax, %rdx
movq 320(%rsi), %rcx
movq 320(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 312(%rdi)
sbbq %rcx, %rax
movq 328(%rsi), %rdx
movq 328(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 320(%rdi)
sbbq %rdx, %rcx
movq 336(%rsi), %rax
movq 336(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 328(%rdi)
sbbq %rax, %rdx
movq 344(%rsi), %rcx
movq 344(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 336(%rdi)
sbbq %rcx, %rax
movq 352(%rsi), %rdx
movq 352(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 344(%rdi)
sbbq %rdx, %rcx
movq 360(%rsi), %rax
movq 360(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 352(%rdi)
sbbq %rax, %rdx
movq 368(%rsi), %rcx
movq 368(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 360(%rdi)
sbbq %rcx, %rax
movq 376(%rsi), %rdx
movq 376(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 368(%rdi)
sbbq %rdx, %rcx
movq 384(%rsi), %rax
movq 384(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 376(%rdi)
sbbq %rax, %rdx
movq 392(%rsi), %rcx
movq 392(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 384(%rdi)
sbbq %rcx, %rax
movq 400(%rsi), %rdx
movq 400(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 392(%rdi)
sbbq %rdx, %rcx
movq 408(%rsi), %rax
movq 408(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 400(%rdi)
sbbq %rax, %rdx
movq 416(%rsi), %rcx
movq 416(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 408(%rdi)
sbbq %rcx, %rax
movq 424(%rsi), %rdx
movq 424(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 416(%rdi)
sbbq %rdx, %rcx
movq 432(%rsi), %rax
movq 432(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 424(%rdi)
sbbq %rax, %rdx
movq 440(%rsi), %rcx
movq 440(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 432(%rdi)
sbbq %rcx, %rax
movq 448(%rsi), %rdx
movq 448(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 440(%rdi)
sbbq %rdx, %rcx
movq 456(%rsi), %rax
movq 456(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 448(%rdi)
sbbq %rax, %rdx
movq 464(%rsi), %rcx
movq 464(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 456(%rdi)
sbbq %rcx, %rax
movq 472(%rsi), %rdx
movq 472(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 464(%rdi)
sbbq %rdx, %rcx
movq 480(%rsi), %rax
movq 480(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 472(%rdi)
sbbq %rax, %rdx
movq 488(%rsi), %rcx
movq 488(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 480(%rdi)
sbbq %rcx, %rax
movq 496(%rsi), %rdx
movq 496(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 488(%rdi)
sbbq %rdx, %rcx
movq 504(%rsi), %rax
movq 504(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 496(%rdi)
sbbq %rax, %rdx
movq %rdx, 504(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_4096_mont_reduce_avx2_64,.-sp_4096_mont_reduce_avx2_64
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifndef WC_NO_CACHE_RESISTANT
#ifndef __APPLE__
.text
.globl sp_4096_get_from_table_avx2_64
.type sp_4096_get_from_table_avx2_64,@function
.align 16
sp_4096_get_from_table_avx2_64:
#else
.section __TEXT,__text
.globl _sp_4096_get_from_table_avx2_64
.p2align 4
_sp_4096_get_from_table_avx2_64:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %rdx, %xmm10
movd %rax, %xmm11
vpxor %ymm13, %ymm13, %ymm13
vpermd %ymm10, %ymm13, %ymm10
vpermd %ymm11, %ymm13, %ymm11
# START: 0-15
vpxor %ymm13, %ymm13, %ymm13
vpxor %ymm4, %ymm4, %ymm4
vpxor %ymm5, %ymm5, %ymm5
vpxor %ymm6, %ymm6, %ymm6
vpxor %ymm7, %ymm7, %ymm7
# ENTRY: 0
movq (%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 1
movq 8(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 2
movq 16(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 3
movq 24(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 4
movq 32(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 5
movq 40(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 6
movq 48(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 7
movq 56(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 8
movq 64(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 9
movq 72(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 10
movq 80(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 11
movq 88(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 12
movq 96(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 13
movq 104(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 14
movq 112(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 15
movq 120(%rsi), %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
vmovdqu %ymm4, (%rdi)
vmovdqu %ymm5, 32(%rdi)
vmovdqu %ymm6, 64(%rdi)
vmovdqu %ymm7, 96(%rdi)
addq $0x80, %rdi
# END: 0-15
# START: 16-31
vpxor %ymm13, %ymm13, %ymm13
vpxor %ymm4, %ymm4, %ymm4
vpxor %ymm5, %ymm5, %ymm5
vpxor %ymm6, %ymm6, %ymm6
vpxor %ymm7, %ymm7, %ymm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x80, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
vmovdqu %ymm4, (%rdi)
vmovdqu %ymm5, 32(%rdi)
vmovdqu %ymm6, 64(%rdi)
vmovdqu %ymm7, 96(%rdi)
addq $0x80, %rdi
# END: 16-31
# START: 32-47
vpxor %ymm13, %ymm13, %ymm13
vpxor %ymm4, %ymm4, %ymm4
vpxor %ymm5, %ymm5, %ymm5
vpxor %ymm6, %ymm6, %ymm6
vpxor %ymm7, %ymm7, %ymm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x100, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
vmovdqu %ymm4, (%rdi)
vmovdqu %ymm5, 32(%rdi)
vmovdqu %ymm6, 64(%rdi)
vmovdqu %ymm7, 96(%rdi)
addq $0x80, %rdi
# END: 32-47
# START: 48-63
vpxor %ymm13, %ymm13, %ymm13
vpxor %ymm4, %ymm4, %ymm4
vpxor %ymm5, %ymm5, %ymm5
vpxor %ymm6, %ymm6, %ymm6
vpxor %ymm7, %ymm7, %ymm7
# ENTRY: 0
movq (%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 1
movq 8(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 2
movq 16(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 3
movq 24(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 4
movq 32(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 5
movq 40(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 6
movq 48(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 7
movq 56(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 8
movq 64(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 9
movq 72(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 10
movq 80(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 11
movq 88(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 12
movq 96(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 13
movq 104(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 14
movq 112(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
# ENTRY: 15
movq 120(%rsi), %rcx
addq $0x180, %rcx
vpcmpeqd %ymm10, %ymm13, %ymm12
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpor %ymm0, %ymm4, %ymm4
vpor %ymm1, %ymm5, %ymm5
vpor %ymm2, %ymm6, %ymm6
vpor %ymm3, %ymm7, %ymm7
vpaddd %ymm11, %ymm13, %ymm13
vmovdqu %ymm4, (%rdi)
vmovdqu %ymm5, 32(%rdi)
vmovdqu %ymm6, 64(%rdi)
vmovdqu %ymm7, 96(%rdi)
# END: 48-63
repz retq
#ifndef __APPLE__
.size sp_4096_get_from_table_avx2_64,.-sp_4096_get_from_table_avx2_64
#endif /* __APPLE__ */
#endif /* !WC_NO_CACHE_RESISTANT */
/* Conditionally add a and b using the mask m.
* m is -1 to add and 0 when not.
*
* r A single precision number representing conditional add result.
* a A single precision number to add with.
* b A single precision number to add.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_4096_cond_add_32
.type sp_4096_cond_add_32,@function
.align 16
sp_4096_cond_add_32:
#else
.section __TEXT,__text
.globl _sp_4096_cond_add_32
.p2align 4
_sp_4096_cond_add_32:
#endif /* __APPLE__ */
subq $0x100, %rsp
movq $0x00, %rax
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 80(%rsp)
movq %r9, 88(%rsp)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 96(%rsp)
movq %r9, 104(%rsp)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 112(%rsp)
movq %r9, 120(%rsp)
movq 128(%rdx), %r8
movq 136(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 128(%rsp)
movq %r9, 136(%rsp)
movq 144(%rdx), %r8
movq 152(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 144(%rsp)
movq %r9, 152(%rsp)
movq 160(%rdx), %r8
movq 168(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 160(%rsp)
movq %r9, 168(%rsp)
movq 176(%rdx), %r8
movq 184(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 176(%rsp)
movq %r9, 184(%rsp)
movq 192(%rdx), %r8
movq 200(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 192(%rsp)
movq %r9, 200(%rsp)
movq 208(%rdx), %r8
movq 216(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 208(%rsp)
movq %r9, 216(%rsp)
movq 224(%rdx), %r8
movq 232(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 224(%rsp)
movq %r9, 232(%rsp)
movq 240(%rdx), %r8
movq 248(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 240(%rsp)
movq %r9, 248(%rsp)
movq (%rsi), %r8
movq (%rsp), %rdx
addq %rdx, %r8
movq 8(%rsi), %r9
movq 8(%rsp), %rdx
adcq %rdx, %r9
movq %r8, (%rdi)
movq 16(%rsi), %r8
movq 16(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 8(%rdi)
movq 24(%rsi), %r9
movq 24(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 16(%rdi)
movq 32(%rsi), %r8
movq 32(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 24(%rdi)
movq 40(%rsi), %r9
movq 40(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 32(%rdi)
movq 48(%rsi), %r8
movq 48(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 40(%rdi)
movq 56(%rsi), %r9
movq 56(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 48(%rdi)
movq 64(%rsi), %r8
movq 64(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 56(%rdi)
movq 72(%rsi), %r9
movq 72(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 64(%rdi)
movq 80(%rsi), %r8
movq 80(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 72(%rdi)
movq 88(%rsi), %r9
movq 88(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 80(%rdi)
movq 96(%rsi), %r8
movq 96(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 88(%rdi)
movq 104(%rsi), %r9
movq 104(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 96(%rdi)
movq 112(%rsi), %r8
movq 112(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 104(%rdi)
movq 120(%rsi), %r9
movq 120(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 112(%rdi)
movq 128(%rsi), %r8
movq 128(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 120(%rdi)
movq 136(%rsi), %r9
movq 136(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 128(%rdi)
movq 144(%rsi), %r8
movq 144(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 136(%rdi)
movq 152(%rsi), %r9
movq 152(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 144(%rdi)
movq 160(%rsi), %r8
movq 160(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 152(%rdi)
movq 168(%rsi), %r9
movq 168(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 160(%rdi)
movq 176(%rsi), %r8
movq 176(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 168(%rdi)
movq 184(%rsi), %r9
movq 184(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 176(%rdi)
movq 192(%rsi), %r8
movq 192(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 184(%rdi)
movq 200(%rsi), %r9
movq 200(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 192(%rdi)
movq 208(%rsi), %r8
movq 208(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 200(%rdi)
movq 216(%rsi), %r9
movq 216(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 208(%rdi)
movq 224(%rsi), %r8
movq 224(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 216(%rdi)
movq 232(%rsi), %r9
movq 232(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 224(%rdi)
movq 240(%rsi), %r8
movq 240(%rsp), %rdx
adcq %rdx, %r8
movq %r9, 232(%rdi)
movq 248(%rsi), %r9
movq 248(%rsp), %rdx
adcq %rdx, %r9
movq %r8, 240(%rdi)
movq %r9, 248(%rdi)
adcq $0x00, %rax
addq $0x100, %rsp
repz retq
#ifndef __APPLE__
.size sp_4096_cond_add_32,.-sp_4096_cond_add_32
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Conditionally add a and b using the mask m.
* m is -1 to add and 0 when not.
*
* r A single precision number representing conditional add result.
* a A single precision number to add with.
* b A single precision number to add.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_4096_cond_add_avx2_32
.type sp_4096_cond_add_avx2_32,@function
.align 16
sp_4096_cond_add_avx2_32:
#else
.section __TEXT,__text
.globl _sp_4096_cond_add_avx2_32
.p2align 4
_sp_4096_cond_add_avx2_32:
#endif /* __APPLE__ */
movq $0x00, %rax
movq (%rdx), %r10
movq (%rsi), %r8
pextq %rcx, %r10, %r10
addq %r10, %r8
movq 8(%rdx), %r10
movq 8(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, (%rdi)
adcq %r10, %r9
movq 16(%rdx), %r8
movq 16(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 8(%rdi)
adcq %r8, %r10
movq 24(%rdx), %r9
movq 24(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 16(%rdi)
adcq %r9, %r8
movq 32(%rdx), %r10
movq 32(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 24(%rdi)
adcq %r10, %r9
movq 40(%rdx), %r8
movq 40(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 32(%rdi)
adcq %r8, %r10
movq 48(%rdx), %r9
movq 48(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 40(%rdi)
adcq %r9, %r8
movq 56(%rdx), %r10
movq 56(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 48(%rdi)
adcq %r10, %r9
movq 64(%rdx), %r8
movq 64(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 56(%rdi)
adcq %r8, %r10
movq 72(%rdx), %r9
movq 72(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 64(%rdi)
adcq %r9, %r8
movq 80(%rdx), %r10
movq 80(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 72(%rdi)
adcq %r10, %r9
movq 88(%rdx), %r8
movq 88(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 80(%rdi)
adcq %r8, %r10
movq 96(%rdx), %r9
movq 96(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 88(%rdi)
adcq %r9, %r8
movq 104(%rdx), %r10
movq 104(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 96(%rdi)
adcq %r10, %r9
movq 112(%rdx), %r8
movq 112(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 104(%rdi)
adcq %r8, %r10
movq 120(%rdx), %r9
movq 120(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 112(%rdi)
adcq %r9, %r8
movq 128(%rdx), %r10
movq 128(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 120(%rdi)
adcq %r10, %r9
movq 136(%rdx), %r8
movq 136(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 128(%rdi)
adcq %r8, %r10
movq 144(%rdx), %r9
movq 144(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 136(%rdi)
adcq %r9, %r8
movq 152(%rdx), %r10
movq 152(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 144(%rdi)
adcq %r10, %r9
movq 160(%rdx), %r8
movq 160(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 152(%rdi)
adcq %r8, %r10
movq 168(%rdx), %r9
movq 168(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 160(%rdi)
adcq %r9, %r8
movq 176(%rdx), %r10
movq 176(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 168(%rdi)
adcq %r10, %r9
movq 184(%rdx), %r8
movq 184(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 176(%rdi)
adcq %r8, %r10
movq 192(%rdx), %r9
movq 192(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 184(%rdi)
adcq %r9, %r8
movq 200(%rdx), %r10
movq 200(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 192(%rdi)
adcq %r10, %r9
movq 208(%rdx), %r8
movq 208(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 200(%rdi)
adcq %r8, %r10
movq 216(%rdx), %r9
movq 216(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 208(%rdi)
adcq %r9, %r8
movq 224(%rdx), %r10
movq 224(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 216(%rdi)
adcq %r10, %r9
movq 232(%rdx), %r8
movq 232(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 224(%rdi)
adcq %r8, %r10
movq 240(%rdx), %r9
movq 240(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 232(%rdi)
adcq %r9, %r8
movq 248(%rdx), %r10
movq 248(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 240(%rdi)
adcq %r10, %r9
movq %r9, 248(%rdi)
adcq $0x00, %rax
repz retq
#ifndef __APPLE__
.size sp_4096_cond_add_avx2_32,.-sp_4096_cond_add_avx2_32
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Shift number left by n bit. (r = a << n)
*
* r Result of left shift by n.
* a Number to shift.
* n Amoutnt o shift.
*/
#ifndef __APPLE__
.text
.globl sp_4096_lshift_64
.type sp_4096_lshift_64,@function
.align 16
sp_4096_lshift_64:
#else
.section __TEXT,__text
.globl _sp_4096_lshift_64
.p2align 4
_sp_4096_lshift_64:
#endif /* __APPLE__ */
movb %dl, %cl
movq $0x00, %r10
movq 472(%rsi), %r11
movq 480(%rsi), %rdx
movq 488(%rsi), %rax
movq 496(%rsi), %r8
movq 504(%rsi), %r9
shldq %cl, %r9, %r10
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 480(%rdi)
movq %rax, 488(%rdi)
movq %r8, 496(%rdi)
movq %r9, 504(%rdi)
movq %r10, 512(%rdi)
movq 440(%rsi), %r9
movq 448(%rsi), %rdx
movq 456(%rsi), %rax
movq 464(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 448(%rdi)
movq %rax, 456(%rdi)
movq %r8, 464(%rdi)
movq %r11, 472(%rdi)
movq 408(%rsi), %r11
movq 416(%rsi), %rdx
movq 424(%rsi), %rax
movq 432(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 416(%rdi)
movq %rax, 424(%rdi)
movq %r8, 432(%rdi)
movq %r9, 440(%rdi)
movq 376(%rsi), %r9
movq 384(%rsi), %rdx
movq 392(%rsi), %rax
movq 400(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 384(%rdi)
movq %rax, 392(%rdi)
movq %r8, 400(%rdi)
movq %r11, 408(%rdi)
movq 344(%rsi), %r11
movq 352(%rsi), %rdx
movq 360(%rsi), %rax
movq 368(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 352(%rdi)
movq %rax, 360(%rdi)
movq %r8, 368(%rdi)
movq %r9, 376(%rdi)
movq 312(%rsi), %r9
movq 320(%rsi), %rdx
movq 328(%rsi), %rax
movq 336(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 320(%rdi)
movq %rax, 328(%rdi)
movq %r8, 336(%rdi)
movq %r11, 344(%rdi)
movq 280(%rsi), %r11
movq 288(%rsi), %rdx
movq 296(%rsi), %rax
movq 304(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 288(%rdi)
movq %rax, 296(%rdi)
movq %r8, 304(%rdi)
movq %r9, 312(%rdi)
movq 248(%rsi), %r9
movq 256(%rsi), %rdx
movq 264(%rsi), %rax
movq 272(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 256(%rdi)
movq %rax, 264(%rdi)
movq %r8, 272(%rdi)
movq %r11, 280(%rdi)
movq 216(%rsi), %r11
movq 224(%rsi), %rdx
movq 232(%rsi), %rax
movq 240(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 224(%rdi)
movq %rax, 232(%rdi)
movq %r8, 240(%rdi)
movq %r9, 248(%rdi)
movq 184(%rsi), %r9
movq 192(%rsi), %rdx
movq 200(%rsi), %rax
movq 208(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 192(%rdi)
movq %rax, 200(%rdi)
movq %r8, 208(%rdi)
movq %r11, 216(%rdi)
movq 152(%rsi), %r11
movq 160(%rsi), %rdx
movq 168(%rsi), %rax
movq 176(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 160(%rdi)
movq %rax, 168(%rdi)
movq %r8, 176(%rdi)
movq %r9, 184(%rdi)
movq 120(%rsi), %r9
movq 128(%rsi), %rdx
movq 136(%rsi), %rax
movq 144(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 128(%rdi)
movq %rax, 136(%rdi)
movq %r8, 144(%rdi)
movq %r11, 152(%rdi)
movq 88(%rsi), %r11
movq 96(%rsi), %rdx
movq 104(%rsi), %rax
movq 112(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 96(%rdi)
movq %rax, 104(%rdi)
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
movq 56(%rsi), %r9
movq 64(%rsi), %rdx
movq 72(%rsi), %rax
movq 80(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 64(%rdi)
movq %rax, 72(%rdi)
movq %r8, 80(%rdi)
movq %r11, 88(%rdi)
movq 24(%rsi), %r11
movq 32(%rsi), %rdx
movq 40(%rsi), %rax
movq 48(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 32(%rdi)
movq %rax, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shlq %cl, %rdx
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %r8, 16(%rdi)
movq %r11, 24(%rdi)
repz retq
#ifndef __APPLE__
.size sp_4096_lshift_64,.-sp_4096_lshift_64
#endif /* __APPLE__ */
#endif /* WOLFSSL_SP_4096 */
#endif /* WOLFSSL_SP_4096 */
#ifndef WOLFSSL_SP_NO_256
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_256_mul_4
.type sp_256_mul_4,@function
.align 16
sp_256_mul_4:
#else
.section __TEXT,__text
.globl _sp_256_mul_4
.p2align 4
_sp_256_mul_4:
#endif /* __APPLE__ */
movq %rdx, %rcx
subq $32, %rsp
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
movq %rax, (%rsp)
movq %rdx, %r9
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 8(%rsp)
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 16(%rsp)
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 24(%rsp)
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 32(%rdi)
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 40(%rdi)
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq (%rsp), %rax
movq 8(%rsp), %rdx
movq 16(%rsp), %r8
movq 24(%rsp), %r9
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
addq $32, %rsp
repz retq
#ifndef __APPLE__
.size sp_256_mul_4,.-sp_256_mul_4
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Multiply a and b into r. (r = a * b)
*
* r Result of multiplication.
* a First number to multiply.
* b Second number to multiply.
*/
#ifndef __APPLE__
.text
.globl sp_256_mul_avx2_4
.type sp_256_mul_avx2_4,@function
.align 16
sp_256_mul_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_mul_avx2_4
.p2align 4
_sp_256_mul_avx2_4:
#endif /* __APPLE__ */
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
movq %rdx, %rbp
movq (%rsi), %rdx
movq 8(%rbp), %r14
# A[0] * B[0]
mulxq (%rbp), %r8, %r9
xorq %rbx, %rbx
# A[0] * B[1]
mulxq %r14, %rax, %r10
adcxq %rax, %r9
# A[0] * B[2]
mulxq 16(%rbp), %rax, %r11
adcxq %rax, %r10
# A[0] * B[3]
mulxq 24(%rbp), %rax, %r12
adcxq %rax, %r11
movq 8(%rsi), %rdx
adcxq %rbx, %r12
# A[1] * B[0]
mulxq (%rbp), %rax, %rcx
xorq %rbx, %rbx
adcxq %rax, %r9
# A[1] * B[1]
mulxq %r14, %rax, %r15
adoxq %rcx, %r10
adcxq %rax, %r10
# A[1] * B[2]
mulxq 16(%rbp), %rax, %rcx
adoxq %r15, %r11
adcxq %rax, %r11
# A[1] * B[3]
mulxq 24(%rbp), %rax, %r13
adoxq %rcx, %r12
adcxq %rax, %r12
adoxq %rbx, %r13
movq 16(%rsi), %rdx
adcxq %rbx, %r13
# A[2] * B[0]
mulxq (%rbp), %rax, %rcx
xorq %rbx, %rbx
adcxq %rax, %r10
# A[2] * B[1]
mulxq %r14, %rax, %r15
adoxq %rcx, %r11
adcxq %rax, %r11
# A[2] * B[2]
mulxq 16(%rbp), %rax, %rcx
adoxq %r15, %r12
adcxq %rax, %r12
# A[2] * B[3]
mulxq 24(%rbp), %rax, %r14
adoxq %rcx, %r13
adcxq %rax, %r13
adoxq %rbx, %r14
movq 24(%rsi), %rdx
adcxq %rbx, %r14
# A[3] * B[0]
mulxq (%rbp), %rax, %rcx
xorq %rbx, %rbx
adcxq %rax, %r11
# A[3] * B[1]
mulxq 8(%rbp), %rax, %r15
adoxq %rcx, %r12
adcxq %rax, %r12
# A[3] * B[2]
mulxq 16(%rbp), %rax, %rcx
adoxq %r15, %r13
adcxq %rax, %r13
# A[3] * B[3]
mulxq 24(%rbp), %rax, %r15
adoxq %rcx, %r14
adcxq %rax, %r14
adoxq %rbx, %r15
adcxq %rbx, %r15
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq %r12, 32(%rdi)
movq %r13, 40(%rdi)
movq %r14, 48(%rdi)
movq %r15, 56(%rdi)
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
repz retq
#ifndef __APPLE__
.size sp_256_mul_avx2_4,.-sp_256_mul_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Square a and put result in r. (r = a * a)
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_256_sqr_4
.type sp_256_sqr_4,@function
.align 16
sp_256_sqr_4:
#else
.section __TEXT,__text
.globl _sp_256_sqr_4
.p2align 4
_sp_256_sqr_4:
#endif /* __APPLE__ */
pushq %r12
subq $32, %rsp
# A[0] * A[0]
movq (%rsi), %rax
mulq %rax
xorq %r9, %r9
movq %rax, (%rsp)
movq %rdx, %r8
# A[0] * A[1]
movq 8(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 8(%rsp)
# A[0] * A[2]
movq 16(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
# A[1] * A[1]
movq 8(%rsi), %rax
mulq %rax
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 16(%rsp)
# A[0] * A[3]
movq 24(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * A[2]
movq 16(%rsi), %rax
mulq 8(%rsi)
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 24(%rsp)
# A[1] * A[3]
movq 24(%rsi), %rax
mulq 8(%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[2] * A[2]
movq 16(%rsi), %rax
mulq %rax
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 32(%rdi)
# A[2] * A[3]
movq 24(%rsi), %rax
mulq 16(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 40(%rdi)
# A[3] * A[3]
movq 24(%rsi), %rax
mulq %rax
addq %rax, %rcx
adcq %rdx, %r8
movq %rcx, 48(%rdi)
movq %r8, 56(%rdi)
movq (%rsp), %rax
movq 8(%rsp), %rdx
movq 16(%rsp), %r10
movq 24(%rsp), %r11
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
addq $32, %rsp
popq %r12
repz retq
#ifndef __APPLE__
.size sp_256_sqr_4,.-sp_256_sqr_4
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Square a and put result in r. (r = a * a)
*
* r Result of squaring.
* a Number to square in Montgomery form.
*/
#ifndef __APPLE__
.text
.globl sp_256_sqr_avx2_4
.type sp_256_sqr_avx2_4,@function
.align 16
sp_256_sqr_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_sqr_avx2_4
.p2align 4
_sp_256_sqr_avx2_4:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
xorq %r8, %r8
movq (%rsi), %rdx
movq 8(%rsi), %rcx
movq 16(%rsi), %rbx
movq 24(%rsi), %r15
# A[0] * A[1]
mulxq %rcx, %r9, %r10
# A[0] * A[2]
mulxq %rbx, %r8, %r11
adoxq %r8, %r10
# A[0] * A[3]
mulxq %r15, %r8, %r12
movq %rcx, %rdx
adoxq %r8, %r11
# A[1] * A[2]
mulxq %rbx, %r8, %rax
movq %r15, %rdx
adcxq %r8, %r11
# A[1] * A[3]
mulxq %rcx, %r8, %r13
movq $0x00, %r15
adoxq %rax, %r12
adcxq %r8, %r12
# A[2] * A[3]
mulxq %rbx, %r8, %r14
adoxq %r15, %r13
adcxq %r8, %r13
adoxq %r15, %r14
adcxq %r15, %r14
# Double with Carry Flag
xorq %r15, %r15
# A[0] * A[0]
movq (%rsi), %rdx
mulxq %rdx, %r8, %rax
adcxq %r9, %r9
adcxq %r10, %r10
adoxq %rax, %r9
# A[1] * A[1]
movq 8(%rsi), %rdx
mulxq %rdx, %rcx, %rbx
adcxq %r11, %r11
adoxq %rcx, %r10
# A[2] * A[2]
movq 16(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adcxq %r12, %r12
adoxq %rbx, %r11
adcxq %r13, %r13
adoxq %rax, %r12
adcxq %r14, %r14
# A[3] * A[3]
movq 24(%rsi), %rdx
mulxq %rdx, %rax, %rbx
adoxq %rcx, %r13
adcxq %r15, %r15
adoxq %rax, %r14
adoxq %rbx, %r15
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq %r12, 32(%rdi)
movq %r13, 40(%rdi)
movq %r14, 48(%rdi)
movq %r15, 56(%rdi)
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_256_sqr_avx2_4,.-sp_256_sqr_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Add b to a into r. (r = a + b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_256_add_4
.type sp_256_add_4,@function
.align 16
sp_256_add_4:
#else
.section __TEXT,__text
.globl _sp_256_add_4
.p2align 4
_sp_256_add_4:
#endif /* __APPLE__ */
xorq %rax, %rax
movq (%rsi), %rcx
movq 8(%rsi), %r8
movq 16(%rsi), %r9
movq 24(%rsi), %r10
addq (%rdx), %rcx
adcq 8(%rdx), %r8
adcq 16(%rdx), %r9
adcq 24(%rdx), %r10
movq %rcx, (%rdi)
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq %r10, 24(%rdi)
adcq $0x00, %rax
repz retq
#ifndef __APPLE__
.size sp_256_add_4,.-sp_256_add_4
#endif /* __APPLE__ */
/* Sub b from a into r. (r = a - b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_256_sub_4
.type sp_256_sub_4,@function
.align 16
sp_256_sub_4:
#else
.section __TEXT,__text
.globl _sp_256_sub_4
.p2align 4
_sp_256_sub_4:
#endif /* __APPLE__ */
xorq %rax, %rax
movq (%rsi), %rcx
movq 8(%rsi), %r8
movq 16(%rsi), %r9
movq 24(%rsi), %r10
subq (%rdx), %rcx
sbbq 8(%rdx), %r8
sbbq 16(%rdx), %r9
sbbq 24(%rdx), %r10
movq %rcx, (%rdi)
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq %r10, 24(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_256_sub_4,.-sp_256_sub_4
#endif /* __APPLE__ */
/* Conditionally copy a into r using the mask m.
* m is -1 to copy and 0 when not.
*
* r A single precision number to copy over.
* a A single precision number to copy.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_256_cond_copy_4
.type sp_256_cond_copy_4,@function
.align 16
sp_256_cond_copy_4:
#else
.section __TEXT,__text
.globl _sp_256_cond_copy_4
.p2align 4
_sp_256_cond_copy_4:
#endif /* __APPLE__ */
movq (%rdi), %rax
movq 8(%rdi), %rcx
movq 16(%rdi), %r8
movq 24(%rdi), %r9
xorq (%rsi), %rax
xorq 8(%rsi), %rcx
xorq 16(%rsi), %r8
xorq 24(%rsi), %r9
andq %rdx, %rax
andq %rdx, %rcx
andq %rdx, %r8
andq %rdx, %r9
xorq %rax, (%rdi)
xorq %rcx, 8(%rdi)
xorq %r8, 16(%rdi)
xorq %r9, 24(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_cond_copy_4,.-sp_256_cond_copy_4
#endif /* __APPLE__ */
/* Multiply two Montgomery form numbers mod the modulus (prime).
* (r = a * b mod m)
*
* r Result of multiplication.
* a First number to multiply in Montgomery form.
* b Second number to multiply in Montgomery form.
* m Modulus (prime).
* mp Montgomery multiplier.
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_mul_4
.type sp_256_mont_mul_4,@function
.align 16
sp_256_mont_mul_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_mul_4
.p2align 4
_sp_256_mont_mul_4:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
movq %rdx, %r8
# A[0] * B[0]
movq (%r8), %rax
mulq (%rsi)
movq %rax, %r9
movq %rdx, %r10
# A[0] * B[1]
movq 8(%r8), %rax
mulq (%rsi)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[1] * B[0]
movq (%r8), %rax
mulq 8(%rsi)
xorq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[0] * B[2]
movq 16(%r8), %rax
mulq (%rsi)
addq %rax, %r11
adcq %rdx, %r12
# A[1] * B[1]
movq 8(%r8), %rax
mulq 8(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[0]
movq (%r8), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[0] * B[3]
movq 24(%r8), %rax
mulq (%rsi)
xorq %r14, %r14
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[2]
movq 16(%r8), %rax
mulq 8(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[2] * B[1]
movq 8(%r8), %rax
mulq 16(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[3] * B[0]
movq (%r8), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r14
# A[1] * B[3]
movq 24(%r8), %rax
mulq 8(%rsi)
xorq %r15, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[2]
movq 16(%r8), %rax
mulq 16(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[3] * B[1]
movq 8(%r8), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x00, %r15
# A[2] * B[3]
movq 24(%r8), %rax
mulq 16(%rsi)
xorq %rbx, %rbx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[2]
movq 16(%r8), %rax
mulq 24(%rsi)
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
# A[3] * B[3]
movq 24(%r8), %rax
mulq 24(%rsi)
addq %rax, %r15
adcq %rdx, %rbx
# Start Reduction
# mu = a[0]-a[3] + a[0]-a[2] << 32 << 64 + (a[0] * 2) << 192
# - a[0] << 32 << 192
# a[0]-a[3] + (a[0] * 2) << 192
movq %r9, %rax
leaq (%r12,%r9,2), %rdx
movq %r10, %rsi
movq %r11, %r8
movq %r11, %rcx
# a[0]-a[2] << 32
shlq $32, %r9
shldq $32, %rsi, %rcx
shldq $32, %rax, %r10
# - a[0] << 32 << 192
subq %r9, %rdx
# + a[0]-a[2] << 32 << 64
addq %r9, %rsi
adcq %r10, %r8
adcq %rcx, %rdx
# a += (mu << 256) - (mu << 224) + (mu << 192) + (mu << 96) - mu
xorq %rcx, %rcx
# a += mu << 256
addq %rax, %r13
adcq %rsi, %r14
adcq %r8, %r15
adcq %rdx, %rbx
sbbq %r9, %r9
# a += mu << 192
addq %rax, %r12
adcq %rsi, %r13
movq %rsi, %r10
adcq %r8, %r14
adcq %rdx, %r15
adcq $0x00, %rbx
sbbq $0x00, %r9
# mu <<= 32
shldq $32, %rdx, %rcx
shldq $32, %r8, %rdx
shldq $32, %rsi, %r8
shldq $32, %rax, %rsi
shlq $32, %rax
# a -= (mu << 32) << 192
subq %rax, %r12
sbbq %rsi, %r13
sbbq %r8, %r14
sbbq %rdx, %r15
sbbq %rcx, %rbx
adcq $0x00, %r9
# a += (mu << 32) << 64
subq %rax, %r10
adcq %rsi, %r11
adcq %r8, %r12
adcq %rdx, %r13
adcq %rcx, %r14
adcq $0x00, %r15
adcq $0x00, %rbx
sbbq $0x00, %r9
movq $0xffffffff00000001, %rsi
# mask m and sub from result if overflow
# m[0] = -1 & mask = mask
# m[2] = 0 & mask = 0
movl %r9d, %eax
andq %r9, %rsi
subq %r9, %r13
sbbq %rax, %r14
movq %r13, (%rdi)
sbbq $0x00, %r15
movq %r14, 8(%rdi)
sbbq %rsi, %rbx
movq %r15, 16(%rdi)
movq %rbx, 24(%rdi)
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_256_mont_mul_4,.-sp_256_mont_mul_4
#endif /* __APPLE__ */
/* Square the Montgomery form number mod the modulus (prime). (r = a * a mod m)
*
* r Result of squaring.
* a Number to square in Montgomery form.
* m Modulus (prime).
* mp Montgomery multiplier.
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_sqr_4
.type sp_256_mont_sqr_4,@function
.align 16
sp_256_mont_sqr_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_sqr_4
.p2align 4
_sp_256_mont_sqr_4:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
# A[0] * A[1]
movq (%rsi), %rax
mulq 8(%rsi)
movq %rax, %r9
movq %rdx, %r10
# A[0] * A[2]
movq (%rsi), %rax
mulq 16(%rsi)
xorq %r11, %r11
addq %rax, %r10
adcq %rdx, %r11
# A[0] * A[3]
movq (%rsi), %rax
mulq 24(%rsi)
xorq %r12, %r12
addq %rax, %r11
adcq %rdx, %r12
# A[1] * A[2]
movq 8(%rsi), %rax
mulq 16(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * A[3]
movq 8(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
# A[2] * A[3]
movq 16(%rsi), %rax
mulq 24(%rsi)
xorq %r14, %r14
addq %rax, %r13
adcq %rdx, %r14
# Double
xorq %r15, %r15
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq $0x00, %r15
# A[0] * A[0]
movq (%rsi), %rax
mulq %rax
movq %rax, %rax
movq %rdx, %rdx
movq %rax, %r8
movq %rdx, %rbx
# A[1] * A[1]
movq 8(%rsi), %rax
mulq %rax
movq %rax, %rax
movq %rdx, %rdx
addq %rbx, %r9
adcq %rax, %r10
adcq $0x00, %rdx
movq %rdx, %rbx
# A[2] * A[2]
movq 16(%rsi), %rax
mulq %rax
movq %rax, %rax
movq %rdx, %rdx
addq %rbx, %r11
adcq %rax, %r12
adcq $0x00, %rdx
movq %rdx, %rbx
# A[3] * A[3]
movq 24(%rsi), %rax
mulq %rax
movq %rax, %rax
movq %rdx, %rdx
addq %rbx, %r13
adcq %rax, %r14
adcq %rdx, %r15
# Start Reduction
# mu = a[0]-a[3] + a[0]-a[2] << 32 << 64 + (a[0] * 2) << 192
# - a[0] << 32 << 192
# a[0]-a[3] + (a[0] * 2) << 192
movq %r8, %rax
leaq (%r11,%r8,2), %rdx
movq %r9, %rsi
movq %r10, %rbx
movq %r10, %rcx
# a[0]-a[2] << 32
shlq $32, %r8
shldq $32, %rsi, %rcx
shldq $32, %rax, %r9
# - a[0] << 32 << 192
subq %r8, %rdx
# + a[0]-a[2] << 32 << 64
addq %r8, %rsi
adcq %r9, %rbx
adcq %rcx, %rdx
# a += (mu << 256) - (mu << 224) + (mu << 192) + (mu << 96) - mu
xorq %rcx, %rcx
# a += mu << 256
addq %rax, %r12
adcq %rsi, %r13
adcq %rbx, %r14
adcq %rdx, %r15
sbbq %r8, %r8
# a += mu << 192
addq %rax, %r11
adcq %rsi, %r12
movq %rsi, %r9
adcq %rbx, %r13
adcq %rdx, %r14
adcq $0x00, %r15
sbbq $0x00, %r8
# mu <<= 32
shldq $32, %rdx, %rcx
shldq $32, %rbx, %rdx
shldq $32, %rsi, %rbx
shldq $32, %rax, %rsi
shlq $32, %rax
# a -= (mu << 32) << 192
subq %rax, %r11
sbbq %rsi, %r12
sbbq %rbx, %r13
sbbq %rdx, %r14
sbbq %rcx, %r15
adcq $0x00, %r8
# a += (mu << 32) << 64
subq %rax, %r9
adcq %rsi, %r10
adcq %rbx, %r11
adcq %rdx, %r12
adcq %rcx, %r13
adcq $0x00, %r14
adcq $0x00, %r15
sbbq $0x00, %r8
movq $0xffffffff00000001, %rsi
# mask m and sub from result if overflow
# m[0] = -1 & mask = mask
# m[2] = 0 & mask = 0
movl %r8d, %eax
andq %r8, %rsi
subq %r8, %r12
sbbq %rax, %r13
movq %r12, (%rdi)
sbbq $0x00, %r14
movq %r13, 8(%rdi)
sbbq %rsi, %r15
movq %r14, 16(%rdi)
movq %r15, 24(%rdi)
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_256_mont_sqr_4,.-sp_256_mont_sqr_4
#endif /* __APPLE__ */
/* Compare a with b in constant time.
*
* a A single precision integer.
* b A single precision integer.
* return -ve, 0 or +ve if a is less than, equal to or greater than b
* respectively.
*/
#ifndef __APPLE__
.text
.globl sp_256_cmp_4
.type sp_256_cmp_4,@function
.align 16
sp_256_cmp_4:
#else
.section __TEXT,__text
.globl _sp_256_cmp_4
.p2align 4
_sp_256_cmp_4:
#endif /* __APPLE__ */
xorq %rcx, %rcx
movq $-1, %rdx
movq $-1, %rax
movq $0x01, %r8
movq 24(%rdi), %r9
movq 24(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 16(%rdi), %r9
movq 16(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 8(%rdi), %r9
movq 8(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq (%rdi), %r9
movq (%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
xorq %rdx, %rax
repz retq
#ifndef __APPLE__
.size sp_256_cmp_4,.-sp_256_cmp_4
#endif /* __APPLE__ */
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_256_cond_sub_4
.type sp_256_cond_sub_4,@function
.align 16
sp_256_cond_sub_4:
#else
.section __TEXT,__text
.globl _sp_256_cond_sub_4
.p2align 4
_sp_256_cond_sub_4:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq (%rdx), %r12
movq 8(%rdx), %r13
movq 16(%rdx), %r14
movq 24(%rdx), %r15
andq %rcx, %r12
andq %rcx, %r13
andq %rcx, %r14
andq %rcx, %r15
movq (%rsi), %r8
movq 8(%rsi), %r9
movq 16(%rsi), %r10
movq 24(%rsi), %r11
subq %r12, %r8
sbbq %r13, %r9
sbbq %r14, %r10
sbbq %r15, %r11
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
sbbq %rax, %rax
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_256_cond_sub_4,.-sp_256_cond_sub_4
#endif /* __APPLE__ */
/* Reduce the number back to 256 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_reduce_4
.type sp_256_mont_reduce_4,@function
.align 16
sp_256_mont_reduce_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_reduce_4
.p2align 4
_sp_256_mont_reduce_4:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq (%rdi), %r8
movq 8(%rdi), %r9
movq 16(%rdi), %r10
movq 24(%rdi), %r11
movq 32(%rdi), %r12
movq 40(%rdi), %r13
movq 48(%rdi), %r14
movq 56(%rdi), %r15
# Start Reduction
# mu = a[0]-a[3] + a[0]-a[2] << 32 << 64 + (a[0] * 2) << 192
# - a[0] << 32 << 192
# a[0]-a[3] + (a[0] * 2) << 192
movq %r8, %rax
leaq (%r11,%r8,2), %rdx
movq %r9, %rbx
movq %r10, %rcx
movq %r10, %rsi
# a[0]-a[2] << 32
shlq $32, %r8
shldq $32, %rbx, %rsi
shldq $32, %rax, %r9
# - a[0] << 32 << 192
subq %r8, %rdx
# + a[0]-a[2] << 32 << 64
addq %r8, %rbx
adcq %r9, %rcx
adcq %rsi, %rdx
# a += (mu << 256) - (mu << 224) + (mu << 192) + (mu << 96) - mu
xorq %rsi, %rsi
# a += mu << 256
addq %rax, %r12
adcq %rbx, %r13
adcq %rcx, %r14
adcq %rdx, %r15
sbbq %r8, %r8
# a += mu << 192
addq %rax, %r11
adcq %rbx, %r12
movq %rbx, %r9
adcq %rcx, %r13
adcq %rdx, %r14
adcq $0x00, %r15
sbbq $0x00, %r8
# mu <<= 32
shldq $32, %rdx, %rsi
shldq $32, %rcx, %rdx
shldq $32, %rbx, %rcx
shldq $32, %rax, %rbx
shlq $32, %rax
# a -= (mu << 32) << 192
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rcx, %r13
sbbq %rdx, %r14
sbbq %rsi, %r15
adcq $0x00, %r8
# a += (mu << 32) << 64
subq %rax, %r9
adcq %rbx, %r10
adcq %rcx, %r11
adcq %rdx, %r12
adcq %rsi, %r13
adcq $0x00, %r14
adcq $0x00, %r15
sbbq $0x00, %r8
movq $0xffffffff00000001, %rbx
# mask m and sub from result if overflow
# m[0] = -1 & mask = mask
# m[2] = 0 & mask = 0
movl %r8d, %eax
andq %r8, %rbx
subq %r8, %r12
sbbq %rax, %r13
movq %r12, (%rdi)
sbbq $0x00, %r14
movq %r13, 8(%rdi)
sbbq %rbx, %r15
movq %r14, 16(%rdi)
movq %r15, 24(%rdi)
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size sp_256_mont_reduce_4,.-sp_256_mont_reduce_4
#endif /* __APPLE__ */
/* Reduce the number back to 256 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_reduce_order_4
.type sp_256_mont_reduce_order_4,@function
.align 16
sp_256_mont_reduce_order_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_reduce_order_4
.p2align 4
_sp_256_mont_reduce_order_4:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rdx, %rcx
# i = 0
xorq %r14, %r14
movq $4, %r8
movq %rdi, %r13
L_mont_loop_4:
# mu = a[i] * mp
movq (%r13), %r12
imulq %rcx, %r12
# a[i+0] += m[0] * mu
movq (%rsi), %rax
movq 8(%rsi), %r10
mulq %r12
movq (%r13), %r15
addq %rax, %r15
movq %rdx, %r9
movq %r15, (%r13)
adcq $0x00, %r9
# a[i+1] += m[1] * mu
movq %r10, %rax
mulq %r12
movq 16(%rsi), %r10
movq 8(%r13), %r15
addq %r9, %rax
movq %rdx, %r11
adcq $0x00, %r11
addq %rax, %r15
movq %r15, 8(%r13)
adcq $0x00, %r11
# a[i+2] += m[2] * mu
movq %r10, %rax
mulq %r12
movq 24(%rsi), %r10
movq 16(%r13), %r15
addq %r11, %rax
movq %rdx, %r9
adcq $0x00, %r9
addq %rax, %r15
movq %r15, 16(%r13)
adcq $0x00, %r9
# a[i+3] += m[3] * mu
movq %r10, %rax
mulq %r12
movq 24(%r13), %r15
addq %r9, %rax
adcq %r14, %rdx
movq $0x00, %r14
adcq $0x00, %r14
addq %rax, %r15
movq %r15, 24(%r13)
adcq %rdx, 32(%r13)
adcq $0x00, %r14
# i += 1
addq $8, %r13
decq %r8
jnz L_mont_loop_4
xorq %rax, %rax
movq 32(%rdi), %rdx
movq 40(%rdi), %r8
movq 48(%rdi), %r15
movq 56(%rdi), %r9
subq %r14, %rax
movq (%rsi), %r10
movq 8(%rsi), %r11
movq 16(%rsi), %r12
movq 24(%rsi), %r13
andq %rax, %r10
andq %rax, %r11
andq %rax, %r12
andq %rax, %r13
subq %r10, %rdx
sbbq %r11, %r8
sbbq %r12, %r15
sbbq %r13, %r9
movq %rdx, (%rdi)
movq %r8, 8(%rdi)
movq %r15, 16(%rdi)
movq %r9, 24(%rdi)
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_256_mont_reduce_order_4,.-sp_256_mont_reduce_order_4
#endif /* __APPLE__ */
/* Add two Montgomery form numbers (r = a + b % m).
*
* r Result of addition.
* a First number to add in Montgomery form.
* b Second number to add in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_add_4
.type sp_256_mont_add_4,@function
.align 16
sp_256_mont_add_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_add_4
.p2align 4
_sp_256_mont_add_4:
#endif /* __APPLE__ */
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
addq (%rdx), %rax
adcq 8(%rdx), %rcx
movq $0xffffffff00000001, %r11
adcq 16(%rdx), %r8
adcq 24(%rdx), %r9
sbbq %rsi, %rsi
movl %esi, %r10d
andq %rsi, %r11
subq %rsi, %rax
sbbq %r10, %rcx
sbbq $0x00, %r8
sbbq %r11, %r9
adcq $0x00, %rsi
andq %rsi, %r10
andq %rsi, %r11
subq %rsi, %rax
sbbq %r10, %rcx
movq %rax, (%rdi)
sbbq $0x00, %r8
movq %rcx, 8(%rdi)
sbbq %r11, %r9
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_mont_add_4,.-sp_256_mont_add_4
#endif /* __APPLE__ */
/* Double a Montgomery form number (r = a + a % m).
*
* r Result of doubling.
* a Number to double in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_dbl_4
.type sp_256_mont_dbl_4,@function
.align 16
sp_256_mont_dbl_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_dbl_4
.p2align 4
_sp_256_mont_dbl_4:
#endif /* __APPLE__ */
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
addq %rdx, %rdx
adcq %rax, %rax
movq $0xffffffff00000001, %r10
adcq %rcx, %rcx
movq %r8, %r11
adcq %r8, %r8
sarq $63, %r11
movl %r11d, %r9d
andq %r11, %r10
subq %r11, %rdx
sbbq %r9, %rax
sbbq $0x00, %rcx
sbbq %r10, %r8
adcq $0x00, %r11
andq %r11, %r9
andq %r11, %r10
subq %r11, %rdx
sbbq %r9, %rax
movq %rdx, (%rdi)
sbbq $0x00, %rcx
movq %rax, 8(%rdi)
sbbq %r10, %r8
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_mont_dbl_4,.-sp_256_mont_dbl_4
#endif /* __APPLE__ */
/* Triple a Montgomery form number (r = a + a + a % m).
*
* r Result of Tripling.
* a Number to triple in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_tpl_4
.type sp_256_mont_tpl_4,@function
.align 16
sp_256_mont_tpl_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_tpl_4
.p2align 4
_sp_256_mont_tpl_4:
#endif /* __APPLE__ */
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
addq %rdx, %rdx
adcq %rax, %rax
movq $0xffffffff00000001, %r10
adcq %rcx, %rcx
adcq %r8, %r8
sbbq %r11, %r11
movl %r11d, %r9d
andq %r11, %r10
subq %r11, %rdx
sbbq %r9, %rax
sbbq $0x00, %rcx
sbbq %r10, %r8
adcq $0x00, %r11
andq %r11, %r9
andq %r11, %r10
subq %r11, %rdx
sbbq %r9, %rax
sbbq $0x00, %rcx
sbbq %r10, %r8
addq (%rsi), %rdx
adcq 8(%rsi), %rax
movq $0xffffffff00000001, %r10
adcq 16(%rsi), %rcx
adcq 24(%rsi), %r8
sbbq $0x00, %r11
movl %r11d, %r9d
andq %r11, %r10
subq %r11, %rdx
sbbq %r9, %rax
sbbq $0x00, %rcx
sbbq %r10, %r8
adcq $0x00, %r11
andq %r11, %r9
andq %r11, %r10
subq %r11, %rdx
sbbq %r9, %rax
movq %rdx, (%rdi)
sbbq $0x00, %rcx
movq %rax, 8(%rdi)
sbbq %r10, %r8
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_mont_tpl_4,.-sp_256_mont_tpl_4
#endif /* __APPLE__ */
/* Subtract two Montgomery form numbers (r = a - b % m).
*
* r Result of subtration.
* a Number to subtract from in Montgomery form.
* b Number to subtract with in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_sub_4
.type sp_256_mont_sub_4,@function
.align 16
sp_256_mont_sub_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_sub_4
.p2align 4
_sp_256_mont_sub_4:
#endif /* __APPLE__ */
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
subq (%rdx), %rax
sbbq 8(%rdx), %rcx
movq $0xffffffff00000001, %r11
sbbq 16(%rdx), %r8
sbbq 24(%rdx), %r9
sbbq %rsi, %rsi
movl %esi, %r10d
andq %rsi, %r11
addq %rsi, %rax
adcq %r10, %rcx
adcq $0x00, %r8
adcq %r11, %r9
adcq $0x00, %rsi
andq %rsi, %r10
andq %rsi, %r11
addq %rsi, %rax
adcq %r10, %rcx
movq %rax, (%rdi)
adcq $0x00, %r8
movq %rcx, 8(%rdi)
adcq %r11, %r9
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_mont_sub_4,.-sp_256_mont_sub_4
#endif /* __APPLE__ */
/* Divide the number by 2 mod the modulus (prime). (r = a / 2 % m)
*
* r Result of division by 2.
* a Number to divide.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_div2_4
.type sp_256_mont_div2_4,@function
.align 16
sp_256_mont_div2_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_div2_4
.p2align 4
_sp_256_mont_div2_4:
#endif /* __APPLE__ */
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
movq $0xffffffff00000001, %r10
movq %rdx, %r11
andq $0x01, %r11
negq %r11
movl %r11d, %r9d
andq %r11, %r10
addq %r11, %rdx
adcq %r9, %rax
adcq $0x00, %rcx
adcq %r10, %r8
movq $0x00, %r11
adcq $0x00, %r11
shrdq $0x01, %rax, %rdx
shrdq $0x01, %rcx, %rax
shrdq $0x01, %r8, %rcx
shrdq $0x01, %r11, %r8
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_mont_div2_4,.-sp_256_mont_div2_4
#endif /* __APPLE__ */
/* Two Montgomery numbers, subtract double second from first (r = a - 2.b % m).
*
* r Result of subtration.
* a Number to subtract from in Montgomery form.
* b Number to double and subtract with in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_rsb_sub_dbl_4
.type sp_256_mont_rsb_sub_dbl_4,@function
.align 16
sp_256_mont_rsb_sub_dbl_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_rsb_sub_dbl_4
.p2align 4
_sp_256_mont_rsb_sub_dbl_4:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
movq (%rdx), %r10
movq 8(%rdx), %r11
movq 16(%rdx), %r12
movq 24(%rdx), %r13
addq %r10, %r10
adcq %r11, %r11
movq $0xffffffff00000001, %r15
adcq %r12, %r12
adcq %r13, %r13
sbbq %rsi, %rsi
movl %esi, %r14d
andq %rsi, %r15
subq %rsi, %r10
sbbq %r14, %r11
sbbq $0x00, %r12
sbbq %r15, %r13
adcq $0x00, %rsi
andq %rsi, %r14
andq %rsi, %r15
subq %rsi, %r10
sbbq %r14, %r11
sbbq $0x00, %r12
sbbq %r15, %r13
subq %r10, %rax
sbbq %r11, %rcx
movq $0xffffffff00000001, %r15
sbbq %r12, %r8
sbbq %r13, %r9
sbbq $0x00, %rsi
movl %esi, %r14d
andq %rsi, %r15
addq %rsi, %rax
adcq %r14, %rcx
adcq $0x00, %r8
adcq %r15, %r9
adcq $0x00, %rsi
andq %rsi, %r14
andq %rsi, %r15
addq %rsi, %rax
adcq %r14, %rcx
movq %rax, (%rdi)
adcq $0x00, %r8
movq %rcx, 8(%rdi)
adcq %r15, %r9
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq (%rdx), %r10
movq 8(%rdx), %r11
movq 16(%rdx), %r12
movq 24(%rdx), %r13
subq %rax, %r10
sbbq %rcx, %r11
movq $0xffffffff00000001, %r15
sbbq %r8, %r12
sbbq %r9, %r13
sbbq %rsi, %rsi
movl %esi, %r14d
andq %rsi, %r15
addq %rsi, %r10
adcq %r14, %r11
adcq $0x00, %r12
adcq %r15, %r13
adcq $0x00, %rsi
andq %rsi, %r14
andq %rsi, %r15
addq %rsi, %r10
adcq %r14, %r11
movq %r10, (%rdx)
adcq $0x00, %r12
movq %r11, 8(%rdx)
adcq %r15, %r13
movq %r12, 16(%rdx)
movq %r13, 24(%rdx)
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_256_mont_rsb_sub_dbl_4,.-sp_256_mont_rsb_sub_dbl_4
#endif /* __APPLE__ */
#ifndef WC_NO_CACHE_RESISTANT
/* Touch each possible point that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of point to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_256_get_point_33_4
.type sp_256_get_point_33_4,@function
.align 16
sp_256_get_point_33_4:
#else
.section __TEXT,__text
.globl _sp_256_get_point_33_4
.p2align 4
_sp_256_get_point_33_4:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %edx, %xmm13
addq $0xc8, %rsi
movd %eax, %xmm15
movq $32, %rax
pshufd $0x00, %xmm15, %xmm15
pshufd $0x00, %xmm13, %xmm13
pxor %xmm14, %xmm14
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
movdqa %xmm15, %xmm14
L_256_get_point_33_4_start_1:
movdqa %xmm14, %xmm12
paddd %xmm15, %xmm14
pcmpeqd %xmm13, %xmm12
movdqu (%rsi), %xmm6
movdqu 16(%rsi), %xmm7
movdqu 64(%rsi), %xmm8
movdqu 80(%rsi), %xmm9
movdqu 128(%rsi), %xmm10
movdqu 144(%rsi), %xmm11
addq $0xc8, %rsi
pand %xmm12, %xmm6
pand %xmm12, %xmm7
pand %xmm12, %xmm8
pand %xmm12, %xmm9
pand %xmm12, %xmm10
pand %xmm12, %xmm11
por %xmm6, %xmm0
por %xmm7, %xmm1
por %xmm8, %xmm2
por %xmm9, %xmm3
por %xmm10, %xmm4
por %xmm11, %xmm5
decq %rax
jnz L_256_get_point_33_4_start_1
movdqu %xmm0, (%rdi)
movdqu %xmm1, 16(%rdi)
movdqu %xmm2, 64(%rdi)
movdqu %xmm3, 80(%rdi)
movdqu %xmm4, 128(%rdi)
movdqu %xmm5, 144(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_get_point_33_4,.-sp_256_get_point_33_4
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Touch each possible point that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of point to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_256_get_point_33_avx2_4
.type sp_256_get_point_33_avx2_4,@function
.align 16
sp_256_get_point_33_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_get_point_33_avx2_4
.p2align 4
_sp_256_get_point_33_avx2_4:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %edx, %xmm7
addq $0xc8, %rsi
movd %eax, %xmm9
movq $32, %rax
vpxor %ymm8, %ymm8, %ymm8
vpermd %ymm7, %ymm8, %ymm7
vpermd %ymm9, %ymm8, %ymm9
vpxor %ymm0, %ymm0, %ymm0
vpxor %ymm1, %ymm1, %ymm1
vpxor %ymm2, %ymm2, %ymm2
vmovdqa %ymm9, %ymm8
L_256_get_point_33_avx2_4_start:
vpcmpeqd %ymm7, %ymm8, %ymm6
vpaddd %ymm9, %ymm8, %ymm8
vmovupd (%rsi), %ymm3
vmovupd 64(%rsi), %ymm4
vmovupd 128(%rsi), %ymm5
addq $0xc8, %rsi
vpand %ymm6, %ymm3, %ymm3
vpand %ymm6, %ymm4, %ymm4
vpand %ymm6, %ymm5, %ymm5
vpor %ymm3, %ymm0, %ymm0
vpor %ymm4, %ymm1, %ymm1
vpor %ymm5, %ymm2, %ymm2
decq %rax
jnz L_256_get_point_33_avx2_4_start
vmovupd %ymm0, (%rdi)
vmovupd %ymm1, 64(%rdi)
vmovupd %ymm2, 128(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_get_point_33_avx2_4,.-sp_256_get_point_33_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* !WC_NO_CACHE_RESISTANT */
#ifdef HAVE_INTEL_AVX2
/* Multiply two Montgomery form numbers mod the modulus (prime).
* (r = a * b mod m)
*
* r Result of multiplication.
* a First number to multiply in Montgomery form.
* b Second number to multiply in Montgomery form.
* m Modulus (prime).
* mp Montgomery multiplier.
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_mul_avx2_4
.type sp_256_mont_mul_avx2_4,@function
.align 16
sp_256_mont_mul_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_mul_avx2_4
.p2align 4
_sp_256_mont_mul_avx2_4:
#endif /* __APPLE__ */
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
movq %rdx, %rbp
movq (%rsi), %rdx
movq 8(%rbp), %r14
# A[0] * B[0]
mulxq (%rbp), %r8, %r9
xorq %rbx, %rbx
# A[0] * B[1]
mulxq %r14, %rax, %r10
adcxq %rax, %r9
# A[0] * B[2]
mulxq 16(%rbp), %rax, %r11
adcxq %rax, %r10
# A[0] * B[3]
mulxq 24(%rbp), %rax, %r12
adcxq %rax, %r11
movq 8(%rsi), %rdx
adcxq %rbx, %r12
# A[1] * B[0]
mulxq (%rbp), %rax, %rcx
xorq %rbx, %rbx
adcxq %rax, %r9
# A[1] * B[1]
mulxq %r14, %rax, %r15
adoxq %rcx, %r10
adcxq %rax, %r10
# A[1] * B[2]
mulxq 16(%rbp), %rax, %rcx
adoxq %r15, %r11
adcxq %rax, %r11
# A[1] * B[3]
mulxq 24(%rbp), %rax, %r13
adoxq %rcx, %r12
adcxq %rax, %r12
adoxq %rbx, %r13
movq 16(%rsi), %rdx
adcxq %rbx, %r13
# A[2] * B[0]
mulxq (%rbp), %rax, %rcx
xorq %rbx, %rbx
adcxq %rax, %r10
# A[2] * B[1]
mulxq %r14, %rax, %r15
adoxq %rcx, %r11
adcxq %rax, %r11
# A[2] * B[2]
mulxq 16(%rbp), %rax, %rcx
adoxq %r15, %r12
adcxq %rax, %r12
# A[2] * B[3]
mulxq 24(%rbp), %rax, %r14
adoxq %rcx, %r13
adcxq %rax, %r13
adoxq %rbx, %r14
movq 24(%rsi), %rdx
adcxq %rbx, %r14
# A[3] * B[0]
mulxq (%rbp), %rax, %rcx
xorq %rbx, %rbx
adcxq %rax, %r11
# A[3] * B[1]
mulxq 8(%rbp), %rax, %r15
adoxq %rcx, %r12
adcxq %rax, %r12
# A[3] * B[2]
mulxq 16(%rbp), %rax, %rcx
adoxq %r15, %r13
adcxq %rax, %r13
# A[3] * B[3]
mulxq 24(%rbp), %rax, %r15
adoxq %rcx, %r14
adcxq %rax, %r14
adoxq %rbx, %r15
adcxq %rbx, %r15
# Start Reduction
# mu = a[0]-a[3] + a[0]-a[2] << 32 << 64 + (a[0] * 2) << 192
# - a[0] << 32 << 192
# a[0]-a[3] + (a[0] * 2) << 192
movq %r8, %rax
leaq (%r11,%r8,2), %rdx
movq %r9, %rsi
movq %r10, %rbp
movq %r10, %rcx
# a[0]-a[2] << 32
shlq $32, %r8
shldq $32, %rsi, %rcx
shldq $32, %rax, %r9
# - a[0] << 32 << 192
subq %r8, %rdx
# + a[0]-a[2] << 32 << 64
addq %r8, %rsi
adcq %r9, %rbp
adcq %rcx, %rdx
# a += (mu << 256) - (mu << 224) + (mu << 192) + (mu << 96) - mu
xorq %rcx, %rcx
# a += mu << 256
addq %rax, %r12
adcq %rsi, %r13
adcq %rbp, %r14
adcq %rdx, %r15
sbbq %r8, %r8
# a += mu << 192
addq %rax, %r11
adcq %rsi, %r12
movq %rsi, %r9
adcq %rbp, %r13
adcq %rdx, %r14
adcq $0x00, %r15
sbbq $0x00, %r8
# mu <<= 32
shldq $32, %rdx, %rcx
shldq $32, %rbp, %rdx
shldq $32, %rsi, %rbp
shldq $32, %rax, %rsi
shlq $32, %rax
# a -= (mu << 32) << 192
subq %rax, %r11
sbbq %rsi, %r12
sbbq %rbp, %r13
sbbq %rdx, %r14
sbbq %rcx, %r15
adcq $0x00, %r8
# a += (mu << 32) << 64
subq %rax, %r9
adcq %rsi, %r10
adcq %rbp, %r11
adcq %rdx, %r12
adcq %rcx, %r13
adcq $0x00, %r14
adcq $0x00, %r15
sbbq $0x00, %r8
movq $0xffffffff00000001, %rsi
# mask m and sub from result if overflow
# m[0] = -1 & mask = mask
# m[2] = 0 & mask = 0
movl %r8d, %eax
andq %r8, %rsi
subq %r8, %r12
sbbq %rax, %r13
movq %r12, (%rdi)
sbbq $0x00, %r14
movq %r13, 8(%rdi)
sbbq %rsi, %r15
movq %r14, 16(%rdi)
movq %r15, 24(%rdi)
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
repz retq
#ifndef __APPLE__
.size sp_256_mont_mul_avx2_4,.-sp_256_mont_mul_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Square the Montgomery form number mod the modulus (prime). (r = a * a mod m)
*
* r Result of squaring.
* a Number to square in Montgomery form.
* m Modulus (prime).
* mp Montgomery multiplier.
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_sqr_avx2_4
.type sp_256_mont_sqr_avx2_4,@function
.align 16
sp_256_mont_sqr_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_sqr_avx2_4
.p2align 4
_sp_256_mont_sqr_avx2_4:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
xorq %r8, %r8
movq (%rsi), %rdx
movq 8(%rsi), %rcx
movq 16(%rsi), %rbx
movq 24(%rsi), %r15
# A[0] * A[1]
mulxq %rcx, %r9, %r10
# A[0] * A[2]
mulxq %rbx, %r8, %r11
adoxq %r8, %r10
# A[0] * A[3]
mulxq %r15, %r8, %r12
movq %rcx, %rdx
adoxq %r8, %r11
# A[1] * A[2]
mulxq %rbx, %r8, %rax
movq %r15, %rdx
adcxq %r8, %r11
# A[1] * A[3]
mulxq %rcx, %r8, %r13
movq $0x00, %r15
adoxq %rax, %r12
adcxq %r8, %r12
# A[2] * A[3]
mulxq %rbx, %r8, %r14
adoxq %r15, %r13
adcxq %r8, %r13
adoxq %r15, %r14
adcxq %r15, %r14
# Double with Carry Flag
xorq %r15, %r15
# A[0] * A[0]
movq (%rsi), %rdx
mulxq %rdx, %r8, %rax
adcxq %r9, %r9
adcxq %r10, %r10
adoxq %rax, %r9
# A[1] * A[1]
movq 8(%rsi), %rdx
mulxq %rdx, %rcx, %rbx
adcxq %r11, %r11
adoxq %rcx, %r10
# A[2] * A[2]
movq 16(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adcxq %r12, %r12
adoxq %rbx, %r11
adcxq %r13, %r13
adoxq %rax, %r12
adcxq %r14, %r14
# A[3] * A[3]
movq 24(%rsi), %rdx
mulxq %rdx, %rax, %rbx
adoxq %rcx, %r13
adcxq %r15, %r15
adoxq %rax, %r14
adoxq %rbx, %r15
# Start Reduction
# mu = a[0]-a[3] + a[0]-a[2] << 32 << 64 + (a[0] * 2) << 192
# - a[0] << 32 << 192
# a[0]-a[3] + (a[0] * 2) << 192
movq %r8, %rax
leaq (%r11,%r8,2), %rdx
movq %r9, %rsi
movq %r10, %rcx
movq %r10, %rbx
# a[0]-a[2] << 32
shlq $32, %r8
shldq $32, %rsi, %rbx
shldq $32, %rax, %r9
# - a[0] << 32 << 192
subq %r8, %rdx
# + a[0]-a[2] << 32 << 64
addq %r8, %rsi
adcq %r9, %rcx
adcq %rbx, %rdx
# a += (mu << 256) - (mu << 224) + (mu << 192) + (mu << 96) - mu
xorq %rbx, %rbx
# a += mu << 256
addq %rax, %r12
adcq %rsi, %r13
adcq %rcx, %r14
adcq %rdx, %r15
sbbq %r8, %r8
# a += mu << 192
addq %rax, %r11
adcq %rsi, %r12
movq %rsi, %r9
adcq %rcx, %r13
adcq %rdx, %r14
adcq $0x00, %r15
sbbq $0x00, %r8
# mu <<= 32
shldq $32, %rdx, %rbx
shldq $32, %rcx, %rdx
shldq $32, %rsi, %rcx
shldq $32, %rax, %rsi
shlq $32, %rax
# a -= (mu << 32) << 192
subq %rax, %r11
sbbq %rsi, %r12
sbbq %rcx, %r13
sbbq %rdx, %r14
sbbq %rbx, %r15
adcq $0x00, %r8
# a += (mu << 32) << 64
subq %rax, %r9
adcq %rsi, %r10
adcq %rcx, %r11
adcq %rdx, %r12
adcq %rbx, %r13
adcq $0x00, %r14
adcq $0x00, %r15
sbbq $0x00, %r8
movq $0xffffffff00000001, %rsi
# mask m and sub from result if overflow
# m[0] = -1 & mask = mask
# m[2] = 0 & mask = 0
movl %r8d, %eax
andq %r8, %rsi
subq %r8, %r12
sbbq %rax, %r13
movq %r12, (%rdi)
sbbq $0x00, %r14
movq %r13, 8(%rdi)
sbbq %rsi, %r15
movq %r14, 16(%rdi)
movq %r15, 24(%rdi)
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_256_mont_sqr_avx2_4,.-sp_256_mont_sqr_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_256_cond_sub_avx2_4
.type sp_256_cond_sub_avx2_4,@function
.align 16
sp_256_cond_sub_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_cond_sub_avx2_4
.p2align 4
_sp_256_cond_sub_avx2_4:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq (%rdx), %r12
movq 8(%rdx), %r13
movq 16(%rdx), %r14
movq 24(%rdx), %r15
andq %rcx, %r12
andq %rcx, %r13
andq %rcx, %r14
andq %rcx, %r15
movq (%rsi), %r8
movq 8(%rsi), %r9
movq 16(%rsi), %r10
movq 24(%rsi), %r11
subq %r12, %r8
sbbq %r13, %r9
sbbq %r14, %r10
sbbq %r15, %r11
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
sbbq %rax, %rax
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_256_cond_sub_avx2_4,.-sp_256_cond_sub_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Reduce the number back to 256 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_reduce_order_avx2_4
.type sp_256_mont_reduce_order_avx2_4,@function
.align 16
sp_256_mont_reduce_order_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_reduce_order_avx2_4
.p2align 4
_sp_256_mont_reduce_order_avx2_4:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
movq %rdx, %rax
movq (%rdi), %r12
movq 8(%rdi), %r13
movq 16(%rdi), %r14
movq 24(%rdi), %r15
xorq %r11, %r11
xorq %r10, %r10
# a[0-4] += m[0-3] * mu = m[0-3] * (a[0] * mp)
movq 32(%rdi), %rbx
# mu = a[0] * mp
movq %r12, %rdx
mulxq %rax, %rdx, %rcx
# a[0] += m[0] * mu
mulx (%rsi), %r8, %r9
adcxq %r8, %r12
# a[1] += m[1] * mu
mulx 8(%rsi), %r8, %rcx
adoxq %r9, %r13
adcxq %r8, %r13
# a[2] += m[2] * mu
mulx 16(%rsi), %r8, %r9
adoxq %rcx, %r14
adcxq %r8, %r14
# a[3] += m[3] * mu
mulx 24(%rsi), %r8, %rcx
adoxq %r9, %r15
adcxq %r8, %r15
# a[4] += carry
adoxq %rcx, %rbx
adcxq %r10, %rbx
# carry
adoxq %r10, %r11
adcxq %r10, %r11
# a[1-5] += m[0-3] * mu = m[0-3] * (a[1] * mp)
movq 40(%rdi), %r12
# mu = a[1] * mp
movq %r13, %rdx
mulxq %rax, %rdx, %rcx
# a[1] += m[0] * mu
mulx (%rsi), %r8, %r9
adcxq %r8, %r13
# a[2] += m[1] * mu
mulx 8(%rsi), %r8, %rcx
adoxq %r9, %r14
adcxq %r8, %r14
# a[3] += m[2] * mu
mulx 16(%rsi), %r8, %r9
adoxq %rcx, %r15
adcxq %r8, %r15
# a[4] += m[3] * mu
mulx 24(%rsi), %r8, %rcx
adoxq %r9, %rbx
adcxq %r8, %rbx
# a[5] += carry
adoxq %rcx, %r12
adcxq %r11, %r12
movq %r10, %r11
# carry
adoxq %r10, %r11
adcxq %r10, %r11
# a[2-6] += m[0-3] * mu = m[0-3] * (a[2] * mp)
movq 48(%rdi), %r13
# mu = a[2] * mp
movq %r14, %rdx
mulxq %rax, %rdx, %rcx
# a[2] += m[0] * mu
mulx (%rsi), %r8, %r9
adcxq %r8, %r14
# a[3] += m[1] * mu
mulx 8(%rsi), %r8, %rcx
adoxq %r9, %r15
adcxq %r8, %r15
# a[4] += m[2] * mu
mulx 16(%rsi), %r8, %r9
adoxq %rcx, %rbx
adcxq %r8, %rbx
# a[5] += m[3] * mu
mulx 24(%rsi), %r8, %rcx
adoxq %r9, %r12
adcxq %r8, %r12
# a[6] += carry
adoxq %rcx, %r13
adcxq %r11, %r13
movq %r10, %r11
# carry
adoxq %r10, %r11
adcxq %r10, %r11
# a[3-7] += m[0-3] * mu = m[0-3] * (a[3] * mp)
movq 56(%rdi), %r14
# mu = a[3] * mp
movq %r15, %rdx
mulxq %rax, %rdx, %rcx
# a[3] += m[0] * mu
mulx (%rsi), %r8, %r9
adcxq %r8, %r15
# a[4] += m[1] * mu
mulx 8(%rsi), %r8, %rcx
adoxq %r9, %rbx
adcxq %r8, %rbx
# a[5] += m[2] * mu
mulx 16(%rsi), %r8, %r9
adoxq %rcx, %r12
adcxq %r8, %r12
# a[6] += m[3] * mu
mulx 24(%rsi), %r8, %rcx
adoxq %r9, %r13
adcxq %r8, %r13
# a[7] += carry
adoxq %rcx, %r14
adcxq %r11, %r14
movq %r10, %r11
# carry
adoxq %r10, %r11
adcxq %r10, %r11
# Subtract mod if carry
negq %r11
movq $0xf3b9cac2fc632551, %r8
movq $0xbce6faada7179e84, %r9
movq $0xffffffff00000000, %rdx
andq %r11, %r8
andq %r11, %r9
andq %r11, %rdx
subq %r8, %rbx
sbbq %r9, %r12
sbbq %r11, %r13
sbbq %rdx, %r14
movq %rbx, (%rdi)
movq %r12, 8(%rdi)
movq %r13, 16(%rdi)
movq %r14, 24(%rdi)
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_256_mont_reduce_order_avx2_4,.-sp_256_mont_reduce_order_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Divide the number by 2 mod the modulus (prime). (r = a / 2 % m)
*
* r Result of division by 2.
* a Number to divide.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_div2_avx2_4
.type sp_256_mont_div2_avx2_4,@function
.align 16
sp_256_mont_div2_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_div2_avx2_4
.p2align 4
_sp_256_mont_div2_avx2_4:
#endif /* __APPLE__ */
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
movq $0xffffffff00000001, %r10
movq %rdx, %r11
andq $0x01, %r11
negq %r11
movl %r11d, %r9d
andq %r11, %r10
addq %r11, %rdx
adcq %r9, %rax
adcq $0x00, %rcx
adcq %r10, %r8
movq $0x00, %r11
adcq $0x00, %r11
shrdq $0x01, %rax, %rdx
shrdq $0x01, %rcx, %rax
shrdq $0x01, %r8, %rcx
shrdq $0x01, %r11, %r8
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_mont_div2_avx2_4,.-sp_256_mont_div2_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifndef WC_NO_CACHE_RESISTANT
/* Touch each possible entry that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of entry to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_256_get_entry_64_4
.type sp_256_get_entry_64_4,@function
.align 16
sp_256_get_entry_64_4:
#else
.section __TEXT,__text
.globl _sp_256_get_entry_64_4
.p2align 4
_sp_256_get_entry_64_4:
#endif /* __APPLE__ */
# From entry 1
movq $0x01, %rax
movd %edx, %xmm9
addq $0x40, %rsi
movd %eax, %xmm11
movq $63, %rax
pshufd $0x00, %xmm11, %xmm11
pshufd $0x00, %xmm9, %xmm9
pxor %xmm10, %xmm10
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
movdqa %xmm11, %xmm10
L_256_get_entry_64_4_start_0:
movdqa %xmm10, %xmm8
paddd %xmm11, %xmm10
pcmpeqd %xmm9, %xmm8
movdqu (%rsi), %xmm4
movdqu 16(%rsi), %xmm5
movdqu 32(%rsi), %xmm6
movdqu 48(%rsi), %xmm7
addq $0x40, %rsi
pand %xmm8, %xmm4
pand %xmm8, %xmm5
pand %xmm8, %xmm6
pand %xmm8, %xmm7
por %xmm4, %xmm0
por %xmm5, %xmm1
por %xmm6, %xmm2
por %xmm7, %xmm3
decq %rax
jnz L_256_get_entry_64_4_start_0
movdqu %xmm0, (%rdi)
movdqu %xmm1, 16(%rdi)
movdqu %xmm2, 64(%rdi)
movdqu %xmm3, 80(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_get_entry_64_4,.-sp_256_get_entry_64_4
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Touch each possible entry that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of entry to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_256_get_entry_64_avx2_4
.type sp_256_get_entry_64_avx2_4,@function
.align 16
sp_256_get_entry_64_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_get_entry_64_avx2_4
.p2align 4
_sp_256_get_entry_64_avx2_4:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %edx, %xmm5
addq $0x40, %rsi
movd %eax, %xmm7
movq $0x40, %rax
vpxor %ymm6, %ymm6, %ymm6
vpermd %ymm5, %ymm6, %ymm5
vpermd %ymm7, %ymm6, %ymm7
vpxor %ymm0, %ymm0, %ymm0
vpxor %ymm1, %ymm1, %ymm1
vmovdqa %ymm7, %ymm6
L_256_get_entry_64_avx2_4_start:
vpcmpeqd %ymm5, %ymm6, %ymm4
vpaddd %ymm7, %ymm6, %ymm6
vmovupd (%rsi), %ymm2
vmovupd 32(%rsi), %ymm3
addq $0x40, %rsi
vpand %ymm4, %ymm2, %ymm2
vpand %ymm4, %ymm3, %ymm3
vpor %ymm2, %ymm0, %ymm0
vpor %ymm3, %ymm1, %ymm1
decq %rax
jnz L_256_get_entry_64_avx2_4_start
vmovupd %ymm0, (%rdi)
vmovupd %ymm1, 64(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_get_entry_64_avx2_4,.-sp_256_get_entry_64_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* !WC_NO_CACHE_RESISTANT */
#ifndef WC_NO_CACHE_RESISTANT
/* Touch each possible entry that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of entry to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_256_get_entry_65_4
.type sp_256_get_entry_65_4,@function
.align 16
sp_256_get_entry_65_4:
#else
.section __TEXT,__text
.globl _sp_256_get_entry_65_4
.p2align 4
_sp_256_get_entry_65_4:
#endif /* __APPLE__ */
# From entry 1
movq $0x01, %rax
movd %edx, %xmm9
addq $0x40, %rsi
movd %eax, %xmm11
movq $0x40, %rax
pshufd $0x00, %xmm11, %xmm11
pshufd $0x00, %xmm9, %xmm9
pxor %xmm10, %xmm10
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
movdqa %xmm11, %xmm10
L_256_get_entry_65_4_start_0:
movdqa %xmm10, %xmm8
paddd %xmm11, %xmm10
pcmpeqd %xmm9, %xmm8
movdqu (%rsi), %xmm4
movdqu 16(%rsi), %xmm5
movdqu 32(%rsi), %xmm6
movdqu 48(%rsi), %xmm7
addq $0x40, %rsi
pand %xmm8, %xmm4
pand %xmm8, %xmm5
pand %xmm8, %xmm6
pand %xmm8, %xmm7
por %xmm4, %xmm0
por %xmm5, %xmm1
por %xmm6, %xmm2
por %xmm7, %xmm3
decq %rax
jnz L_256_get_entry_65_4_start_0
movdqu %xmm0, (%rdi)
movdqu %xmm1, 16(%rdi)
movdqu %xmm2, 64(%rdi)
movdqu %xmm3, 80(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_get_entry_65_4,.-sp_256_get_entry_65_4
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Touch each possible entry that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of entry to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_256_get_entry_65_avx2_4
.type sp_256_get_entry_65_avx2_4,@function
.align 16
sp_256_get_entry_65_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_get_entry_65_avx2_4
.p2align 4
_sp_256_get_entry_65_avx2_4:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %edx, %xmm5
addq $0x40, %rsi
movd %eax, %xmm7
movq $0x41, %rax
vpxor %ymm6, %ymm6, %ymm6
vpermd %ymm5, %ymm6, %ymm5
vpermd %ymm7, %ymm6, %ymm7
vpxor %ymm0, %ymm0, %ymm0
vpxor %ymm1, %ymm1, %ymm1
vmovdqa %ymm7, %ymm6
L_256_get_entry_65_avx2_4_start:
vpcmpeqd %ymm5, %ymm6, %ymm4
vpaddd %ymm7, %ymm6, %ymm6
vmovupd (%rsi), %ymm2
vmovupd 32(%rsi), %ymm3
addq $0x40, %rsi
vpand %ymm4, %ymm2, %ymm2
vpand %ymm4, %ymm3, %ymm3
vpor %ymm2, %ymm0, %ymm0
vpor %ymm3, %ymm1, %ymm1
decq %rax
jnz L_256_get_entry_65_avx2_4_start
vmovupd %ymm0, (%rdi)
vmovupd %ymm1, 64(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_get_entry_65_avx2_4,.-sp_256_get_entry_65_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* !WC_NO_CACHE_RESISTANT */
/* Add 1 to a. (a = a + 1)
*
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_256_add_one_4
.type sp_256_add_one_4,@function
.align 16
sp_256_add_one_4:
#else
.section __TEXT,__text
.globl _sp_256_add_one_4
.p2align 4
_sp_256_add_one_4:
#endif /* __APPLE__ */
addq $0x01, (%rdi)
adcq $0x00, 8(%rdi)
adcq $0x00, 16(%rdi)
adcq $0x00, 24(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_add_one_4,.-sp_256_add_one_4
#endif /* __APPLE__ */
/* Read big endian unsigned byte array into r.
* Uses the bswap instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_256_from_bin_bswap
.type sp_256_from_bin_bswap,@function
.align 16
sp_256_from_bin_bswap:
#else
.section __TEXT,__text
.globl _sp_256_from_bin_bswap
.p2align 4
_sp_256_from_bin_bswap:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $32, %r10
xorq %r11, %r11
jmp L_256_from_bin_bswap_64_end
L_256_from_bin_bswap_64_start:
subq $0x40, %r9
movq 56(%r9), %rax
movq 48(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq 40(%r9), %rax
movq 32(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movq 24(%r9), %rax
movq 16(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq 8(%r9), %rax
movq (%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_256_from_bin_bswap_64_end:
cmpq $63, %rcx
jg L_256_from_bin_bswap_64_start
jmp L_256_from_bin_bswap_8_end
L_256_from_bin_bswap_8_start:
subq $8, %r9
movq (%r9), %rax
bswapq %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_256_from_bin_bswap_8_end:
cmpq $7, %rcx
jg L_256_from_bin_bswap_8_start
cmpq %r11, %rcx
je L_256_from_bin_bswap_hi_end
movq %r11, %r8
movq %r11, %rax
L_256_from_bin_bswap_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_256_from_bin_bswap_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_256_from_bin_bswap_hi_end:
cmpq %r10, %rdi
jge L_256_from_bin_bswap_zero_end
L_256_from_bin_bswap_zero_start:
movq %r11, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_256_from_bin_bswap_zero_start
L_256_from_bin_bswap_zero_end:
repz retq
#ifndef __APPLE__
.size sp_256_from_bin_bswap,.-sp_256_from_bin_bswap
#endif /* __APPLE__ */
#ifndef NO_MOVBE_SUPPORT
/* Read big endian unsigned byte array into r.
* Uses the movbe instruction which is an optional instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_256_from_bin_movbe
.type sp_256_from_bin_movbe,@function
.align 16
sp_256_from_bin_movbe:
#else
.section __TEXT,__text
.globl _sp_256_from_bin_movbe
.p2align 4
_sp_256_from_bin_movbe:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $32, %r10
jmp L_256_from_bin_movbe_64_end
L_256_from_bin_movbe_64_start:
subq $0x40, %r9
movbeq 56(%r9), %rax
movbeq 48(%r9), %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movbeq 40(%r9), %rax
movbeq 32(%r9), %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movbeq 24(%r9), %rax
movbeq 16(%r9), %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movbeq 8(%r9), %rax
movbeq (%r9), %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_256_from_bin_movbe_64_end:
cmpq $63, %rcx
jg L_256_from_bin_movbe_64_start
jmp L_256_from_bin_movbe_8_end
L_256_from_bin_movbe_8_start:
subq $8, %r9
movbeq (%r9), %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_256_from_bin_movbe_8_end:
cmpq $7, %rcx
jg L_256_from_bin_movbe_8_start
cmpq $0x00, %rcx
je L_256_from_bin_movbe_hi_end
movq $0x00, %r8
movq $0x00, %rax
L_256_from_bin_movbe_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_256_from_bin_movbe_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_256_from_bin_movbe_hi_end:
cmpq %r10, %rdi
jge L_256_from_bin_movbe_zero_end
L_256_from_bin_movbe_zero_start:
movq $0x00, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_256_from_bin_movbe_zero_start
L_256_from_bin_movbe_zero_end:
repz retq
#ifndef __APPLE__
.size sp_256_from_bin_movbe,.-sp_256_from_bin_movbe
#endif /* __APPLE__ */
#endif /* !NO_MOVBE_SUPPORT */
/* Write r as big endian to byte array.
* Fixed length number of bytes written: 32
* Uses the bswap instruction.
*
* r A single precision integer.
* a Byte array.
*/
#ifndef __APPLE__
.text
.globl sp_256_to_bin_bswap_4
.type sp_256_to_bin_bswap_4,@function
.align 16
sp_256_to_bin_bswap_4:
#else
.section __TEXT,__text
.globl _sp_256_to_bin_bswap_4
.p2align 4
_sp_256_to_bin_bswap_4:
#endif /* __APPLE__ */
movq 24(%rdi), %rdx
movq 16(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, (%rsi)
movq %rax, 8(%rsi)
movq 8(%rdi), %rdx
movq (%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 16(%rsi)
movq %rax, 24(%rsi)
repz retq
#ifndef __APPLE__
.size sp_256_to_bin_bswap_4,.-sp_256_to_bin_bswap_4
#endif /* __APPLE__ */
#ifndef NO_MOVBE_SUPPORT
/* Write r as big endian to byte array.
* Fixed length number of bytes written: 32
* Uses the movbe instruction which is optional.
*
* r A single precision integer.
* a Byte array.
*/
#ifndef __APPLE__
.text
.globl sp_256_to_bin_movbe_4
.type sp_256_to_bin_movbe_4,@function
.align 16
sp_256_to_bin_movbe_4:
#else
.section __TEXT,__text
.globl _sp_256_to_bin_movbe_4
.p2align 4
_sp_256_to_bin_movbe_4:
#endif /* __APPLE__ */
movbeq 24(%rdi), %rdx
movbeq 16(%rdi), %rax
movq %rdx, (%rsi)
movq %rax, 8(%rsi)
movbeq 8(%rdi), %rdx
movbeq (%rdi), %rax
movq %rdx, 16(%rsi)
movq %rax, 24(%rsi)
repz retq
#ifndef __APPLE__
.size sp_256_to_bin_movbe_4,.-sp_256_to_bin_movbe_4
#endif /* __APPLE__ */
#endif /* NO_MOVBE_SUPPORT */
/* Sub b from a into a. (a -= b)
*
* a A single precision integer and result.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_256_sub_in_place_4
.type sp_256_sub_in_place_4,@function
.align 16
sp_256_sub_in_place_4:
#else
.section __TEXT,__text
.globl _sp_256_sub_in_place_4
.p2align 4
_sp_256_sub_in_place_4:
#endif /* __APPLE__ */
movq (%rsi), %rdx
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
subq %rdx, (%rdi)
sbbq %rcx, 8(%rdi)
sbbq %r8, 16(%rdi)
sbbq %r9, 24(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_256_sub_in_place_4,.-sp_256_sub_in_place_4
#endif /* __APPLE__ */
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_256_mul_d_4
.type sp_256_mul_d_4,@function
.align 16
sp_256_mul_d_4:
#else
.section __TEXT,__text
.globl _sp_256_mul_d_4
.p2align 4
_sp_256_mul_d_4:
#endif /* __APPLE__ */
movq %rdx, %rcx
# A[0] * B
movq %rcx, %rax
xorq %r10, %r10
mulq (%rsi)
movq %rax, %r8
movq %rdx, %r9
movq %r8, (%rdi)
# A[1] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 8(%rsi)
addq %rax, %r9
movq %r9, 8(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 16(%rsi)
addq %rax, %r10
movq %r10, 16(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B
movq %rcx, %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_mul_d_4,.-sp_256_mul_d_4
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_256_mul_d_avx2_4
.type sp_256_mul_d_avx2_4,@function
.align 16
sp_256_mul_d_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_mul_d_avx2_4
.p2align 4
_sp_256_mul_d_avx2_4:
#endif /* __APPLE__ */
movq %rdx, %rax
# A[0] * B
movq %rax, %rdx
xorq %r11, %r11
mulxq (%rsi), %r9, %r10
movq %r9, (%rdi)
# A[1] * B
mulxq 8(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 8(%rdi)
# A[2] * B
mulxq 16(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 16(%rdi)
# A[3] * B
mulxq 24(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
adcxq %r11, %r9
movq %r10, 24(%rdi)
movq %r9, 32(%rdi)
repz retq
#ifndef __APPLE__
.size sp_256_mul_d_avx2_4,.-sp_256_mul_d_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef _WIN64
/* Divide the double width number (d1|d0) by the dividend. (d1|d0 / div)
*
* d1 The high order half of the number to divide.
* d0 The low order half of the number to divide.
* div The dividend.
* returns the result of the division.
*/
#ifndef __APPLE__
.text
.globl div_256_word_asm_4
.type div_256_word_asm_4,@function
.align 16
div_256_word_asm_4:
#else
.section __TEXT,__text
.globl _div_256_word_asm_4
.p2align 4
_div_256_word_asm_4:
#endif /* __APPLE__ */
movq %rdx, %rcx
movq %rsi, %rax
movq %rdi, %rdx
divq %rcx
repz retq
#ifndef __APPLE__
.size div_256_word_asm_4,.-div_256_word_asm_4
#endif /* __APPLE__ */
#endif /* _WIN64 */
#ifdef HAVE_INTEL_AVX2
/* Multiply two Montgomery form numbers mod the modulus (prime).
* (r = a * b mod m)
*
* r Result of multiplication.
* a First number to multiply in Montgomery form.
* b Second number to multiply in Montgomery form.
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_mul_order_avx2_4
.type sp_256_mont_mul_order_avx2_4,@function
.align 16
sp_256_mont_mul_order_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_mul_order_avx2_4
.p2align 4
_sp_256_mont_mul_order_avx2_4:
#endif /* __APPLE__ */
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
movq %rdx, %rbp
movq (%rsi), %rdx
movq 8(%rbp), %r14
# A[0] * B[0]
mulxq (%rbp), %r8, %r9
xorq %rbx, %rbx
# A[0] * B[1]
mulxq %r14, %rax, %r10
adcxq %rax, %r9
# A[0] * B[2]
mulxq 16(%rbp), %rax, %r11
adcxq %rax, %r10
# A[0] * B[3]
mulxq 24(%rbp), %rax, %r12
adcxq %rax, %r11
movq 8(%rsi), %rdx
adcxq %rbx, %r12
# A[1] * B[0]
mulxq (%rbp), %rax, %rcx
xorq %rbx, %rbx
adcxq %rax, %r9
# A[1] * B[1]
mulxq %r14, %rax, %r15
adoxq %rcx, %r10
adcxq %rax, %r10
# A[1] * B[2]
mulxq 16(%rbp), %rax, %rcx
adoxq %r15, %r11
adcxq %rax, %r11
# A[1] * B[3]
mulxq 24(%rbp), %rax, %r13
adoxq %rcx, %r12
adcxq %rax, %r12
adoxq %rbx, %r13
movq 16(%rsi), %rdx
adcxq %rbx, %r13
# A[2] * B[0]
mulxq (%rbp), %rax, %rcx
xorq %rbx, %rbx
adcxq %rax, %r10
# A[2] * B[1]
mulxq %r14, %rax, %r15
adoxq %rcx, %r11
adcxq %rax, %r11
# A[2] * B[2]
mulxq 16(%rbp), %rax, %rcx
adoxq %r15, %r12
adcxq %rax, %r12
# A[2] * B[3]
mulxq 24(%rbp), %rax, %r14
adoxq %rcx, %r13
adcxq %rax, %r13
adoxq %rbx, %r14
movq 24(%rsi), %rdx
adcxq %rbx, %r14
# A[3] * B[0]
mulxq (%rbp), %rax, %rcx
xorq %rbx, %rbx
adcxq %rax, %r11
# A[3] * B[1]
mulxq 8(%rbp), %rax, %r15
adoxq %rcx, %r12
adcxq %rax, %r12
# A[3] * B[2]
mulxq 16(%rbp), %rax, %rcx
adoxq %r15, %r13
adcxq %rax, %r13
# A[3] * B[3]
mulxq 24(%rbp), %rax, %r15
adoxq %rcx, %r14
adcxq %rax, %r14
adoxq %rbx, %r15
adcxq %rbx, %r15
# Start Reduction
movq $0xccd1c8aaee00bc4f, %rbx
# A[0]
movq %rbx, %rdx
imulq %r8, %rdx
movq $0xf3b9cac2fc632551, %rax
xorq %rbp, %rbp
mulxq %rax, %rcx, %rsi
movq $0xbce6faada7179e84, %rax
adcxq %rcx, %r8
adoxq %rsi, %r9
mulxq %rax, %rcx, %rsi
movq $0xffffffffffffffff, %rax
adcxq %rcx, %r9
adoxq %rsi, %r10
mulxq %rax, %rcx, %rsi
movq $0xffffffff00000000, %rax
adcxq %rcx, %r10
adoxq %rsi, %r11
mulxq %rax, %rcx, %rsi
adcxq %rcx, %r11
adoxq %rsi, %r12
adcxq %rbp, %r12
movq %rbp, %r8
# carry
adoxq %rbp, %r8
adcxq %rbp, %r8
# A[1]
movq %rbx, %rdx
imulq %r9, %rdx
movq $0xf3b9cac2fc632551, %rax
xorq %rbp, %rbp
mulxq %rax, %rcx, %rsi
movq $0xbce6faada7179e84, %rax
adcxq %rcx, %r9
adoxq %rsi, %r10
mulxq %rax, %rcx, %rsi
movq $0xffffffffffffffff, %rax
adcxq %rcx, %r10
adoxq %rsi, %r11
mulxq %rax, %rcx, %rsi
movq $0xffffffff00000000, %rax
adcxq %rcx, %r11
adoxq %rsi, %r12
mulxq %rax, %rcx, %rsi
adcxq %rcx, %r12
adoxq %rsi, %r13
adcxq %r8, %r13
movq %rbp, %r8
# carry
adoxq %rbp, %r8
adcxq %rbp, %r8
# A[2]
movq %rbx, %rdx
imulq %r10, %rdx
movq $0xf3b9cac2fc632551, %rax
xorq %rbp, %rbp
mulxq %rax, %rcx, %rsi
movq $0xbce6faada7179e84, %rax
adcxq %rcx, %r10
adoxq %rsi, %r11
mulxq %rax, %rcx, %rsi
movq $0xffffffffffffffff, %rax
adcxq %rcx, %r11
adoxq %rsi, %r12
mulxq %rax, %rcx, %rsi
movq $0xffffffff00000000, %rax
adcxq %rcx, %r12
adoxq %rsi, %r13
mulxq %rax, %rcx, %rsi
adcxq %rcx, %r13
adoxq %rsi, %r14
adcxq %r8, %r14
movq %rbp, %r8
# carry
adoxq %rbp, %r8
adcxq %rbp, %r8
# A[3]
movq %rbx, %rdx
imulq %r11, %rdx
movq $0xf3b9cac2fc632551, %rax
xorq %rbp, %rbp
mulxq %rax, %rcx, %rsi
movq $0xbce6faada7179e84, %rax
adcxq %rcx, %r11
adoxq %rsi, %r12
mulxq %rax, %rcx, %rsi
movq $0xffffffffffffffff, %rax
adcxq %rcx, %r12
adoxq %rsi, %r13
mulxq %rax, %rcx, %rsi
movq $0xffffffff00000000, %rax
adcxq %rcx, %r13
adoxq %rsi, %r14
mulxq %rax, %rcx, %rsi
adcxq %rcx, %r14
adoxq %rsi, %r15
adcxq %r8, %r15
movq %rbp, %r8
# carry
adoxq %rbp, %r8
adcxq %rbp, %r8
negq %r8
movq $0xf3b9cac2fc632551, %rax
movq $0xbce6faada7179e84, %rbx
andq %r8, %rax
movq $0xffffffff00000000, %rbp
andq %r8, %rbx
andq %r8, %rbp
subq %rax, %r12
sbbq %rbx, %r13
movq %r12, (%rdi)
sbbq %r8, %r14
movq %r13, 8(%rdi)
sbbq %rbp, %r15
movq %r14, 16(%rdi)
movq %r15, 24(%rdi)
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
repz retq
#ifndef __APPLE__
.size sp_256_mont_mul_order_avx2_4,.-sp_256_mont_mul_order_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Square the Montgomery form number mod the modulus (prime). (r = a * a mod m)
*
* r Result of squaring.
* a Number to square in Montgomery form.
*/
#ifndef __APPLE__
.text
.globl sp_256_mont_sqr_order_avx2_4
.type sp_256_mont_sqr_order_avx2_4,@function
.align 16
sp_256_mont_sqr_order_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_mont_sqr_order_avx2_4
.p2align 4
_sp_256_mont_sqr_order_avx2_4:
#endif /* __APPLE__ */
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
xorq %r8, %r8
movq (%rsi), %rdx
movq 8(%rsi), %rcx
movq 16(%rsi), %rbx
movq 24(%rsi), %r15
# A[0] * A[1]
mulxq %rcx, %r9, %r10
# A[0] * A[2]
mulxq %rbx, %r8, %r11
adoxq %r8, %r10
# A[0] * A[3]
mulxq %r15, %r8, %r12
movq %rcx, %rdx
adoxq %r8, %r11
# A[1] * A[2]
mulxq %rbx, %r8, %rax
movq %r15, %rdx
adcxq %r8, %r11
# A[1] * A[3]
mulxq %rcx, %r8, %r13
movq $0x00, %r15
adoxq %rax, %r12
adcxq %r8, %r12
# A[2] * A[3]
mulxq %rbx, %r8, %r14
adoxq %r15, %r13
adcxq %r8, %r13
adoxq %r15, %r14
adcxq %r15, %r14
# Double with Carry Flag
xorq %r15, %r15
# A[0] * A[0]
movq (%rsi), %rdx
mulxq %rdx, %r8, %rax
adcxq %r9, %r9
adcxq %r10, %r10
adoxq %rax, %r9
# A[1] * A[1]
movq 8(%rsi), %rdx
mulxq %rdx, %rcx, %rbx
adcxq %r11, %r11
adoxq %rcx, %r10
# A[2] * A[2]
movq 16(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adcxq %r12, %r12
adoxq %rbx, %r11
adcxq %r13, %r13
adoxq %rax, %r12
adcxq %r14, %r14
# A[3] * A[3]
movq 24(%rsi), %rdx
mulxq %rdx, %rax, %rbx
adoxq %rcx, %r13
adcxq %r15, %r15
adoxq %rax, %r14
adoxq %rbx, %r15
# Start Reduction
movq $0xccd1c8aaee00bc4f, %rbx
# A[0]
movq %rbx, %rdx
imulq %r8, %rdx
movq $0xf3b9cac2fc632551, %rax
xorq %rbp, %rbp
mulxq %rax, %rcx, %rsi
movq $0xbce6faada7179e84, %rax
adcxq %rcx, %r8
adoxq %rsi, %r9
mulxq %rax, %rcx, %rsi
movq $0xffffffffffffffff, %rax
adcxq %rcx, %r9
adoxq %rsi, %r10
mulxq %rax, %rcx, %rsi
movq $0xffffffff00000000, %rax
adcxq %rcx, %r10
adoxq %rsi, %r11
mulxq %rax, %rcx, %rsi
adcxq %rcx, %r11
adoxq %rsi, %r12
adcxq %rbp, %r12
movq %rbp, %r8
# carry
adoxq %rbp, %r8
adcxq %rbp, %r8
# A[1]
movq %rbx, %rdx
imulq %r9, %rdx
movq $0xf3b9cac2fc632551, %rax
xorq %rbp, %rbp
mulxq %rax, %rcx, %rsi
movq $0xbce6faada7179e84, %rax
adcxq %rcx, %r9
adoxq %rsi, %r10
mulxq %rax, %rcx, %rsi
movq $0xffffffffffffffff, %rax
adcxq %rcx, %r10
adoxq %rsi, %r11
mulxq %rax, %rcx, %rsi
movq $0xffffffff00000000, %rax
adcxq %rcx, %r11
adoxq %rsi, %r12
mulxq %rax, %rcx, %rsi
adcxq %rcx, %r12
adoxq %rsi, %r13
adcxq %r8, %r13
movq %rbp, %r8
# carry
adoxq %rbp, %r8
adcxq %rbp, %r8
# A[2]
movq %rbx, %rdx
imulq %r10, %rdx
movq $0xf3b9cac2fc632551, %rax
xorq %rbp, %rbp
mulxq %rax, %rcx, %rsi
movq $0xbce6faada7179e84, %rax
adcxq %rcx, %r10
adoxq %rsi, %r11
mulxq %rax, %rcx, %rsi
movq $0xffffffffffffffff, %rax
adcxq %rcx, %r11
adoxq %rsi, %r12
mulxq %rax, %rcx, %rsi
movq $0xffffffff00000000, %rax
adcxq %rcx, %r12
adoxq %rsi, %r13
mulxq %rax, %rcx, %rsi
adcxq %rcx, %r13
adoxq %rsi, %r14
adcxq %r8, %r14
movq %rbp, %r8
# carry
adoxq %rbp, %r8
adcxq %rbp, %r8
# A[3]
movq %rbx, %rdx
imulq %r11, %rdx
movq $0xf3b9cac2fc632551, %rax
xorq %rbp, %rbp
mulxq %rax, %rcx, %rsi
movq $0xbce6faada7179e84, %rax
adcxq %rcx, %r11
adoxq %rsi, %r12
mulxq %rax, %rcx, %rsi
movq $0xffffffffffffffff, %rax
adcxq %rcx, %r12
adoxq %rsi, %r13
mulxq %rax, %rcx, %rsi
movq $0xffffffff00000000, %rax
adcxq %rcx, %r13
adoxq %rsi, %r14
mulxq %rax, %rcx, %rsi
adcxq %rcx, %r14
adoxq %rsi, %r15
adcxq %r8, %r15
movq %rbp, %r8
# carry
adoxq %rbp, %r8
adcxq %rbp, %r8
negq %r8
movq $0xf3b9cac2fc632551, %rax
movq $0xbce6faada7179e84, %rbx
andq %r8, %rax
movq $0xffffffff00000000, %rbp
andq %r8, %rbx
andq %r8, %rbp
subq %rax, %r12
sbbq %rbx, %r13
movq %r12, (%rdi)
sbbq %r8, %r14
movq %r13, 8(%rdi)
sbbq %rbp, %r15
movq %r14, 16(%rdi)
movq %r15, 24(%rdi)
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
repz retq
#ifndef __APPLE__
.size sp_256_mont_sqr_order_avx2_4,.-sp_256_mont_sqr_order_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Non-constant time modular inversion.
*
* @param [out] r Resulting number.
* @param [in] a Number to invert.
* @param [in] m Modulus.
* @return MP_OKAY on success.
*/
#ifndef __APPLE__
.text
.globl sp_256_mod_inv_4
.type sp_256_mod_inv_4,@function
.align 16
sp_256_mod_inv_4:
#else
.section __TEXT,__text
.globl _sp_256_mod_inv_4
.p2align 4
_sp_256_mod_inv_4:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x201, %rsp
movq (%rdx), %rcx
movq 8(%rdx), %r8
movq 16(%rdx), %r9
movq 24(%rdx), %r10
movq (%rsi), %r11
movq 8(%rsi), %r12
movq 16(%rsi), %r13
movq 24(%rsi), %r14
movq $0x00, %r15
testb $0x01, %r11b
jnz L_256_mod_inv_4_v_even_end
L_256_mod_inv_4_v_even_start:
shrdq $0x01, %r12, %r11
shrdq $0x01, %r13, %r12
shrdq $0x01, %r14, %r13
shrq $0x01, %r14
movb $0x01, (%rsp,%r15,1)
incq %r15
testb $0x01, %r11b
jz L_256_mod_inv_4_v_even_start
L_256_mod_inv_4_v_even_end:
L_256_mod_inv_4_uv_start:
cmpq %r14, %r10
jb L_256_mod_inv_4_uv_v
ja L_256_mod_inv_4_uv_u
cmpq %r13, %r9
jb L_256_mod_inv_4_uv_v
ja L_256_mod_inv_4_uv_u
cmpq %r12, %r8
jb L_256_mod_inv_4_uv_v
ja L_256_mod_inv_4_uv_u
cmpq %r11, %rcx
jb L_256_mod_inv_4_uv_v
L_256_mod_inv_4_uv_u:
movb $2, (%rsp,%r15,1)
incq %r15
subq %r11, %rcx
sbbq %r12, %r8
sbbq %r13, %r9
sbbq %r14, %r10
shrdq $0x01, %r8, %rcx
shrdq $0x01, %r9, %r8
shrdq $0x01, %r10, %r9
shrq $0x01, %r10
testb $0x01, %cl
jnz L_256_mod_inv_4_usubv_even_end
L_256_mod_inv_4_usubv_even_start:
shrdq $0x01, %r8, %rcx
shrdq $0x01, %r9, %r8
shrdq $0x01, %r10, %r9
shrq $0x01, %r10
movb $0x00, (%rsp,%r15,1)
incq %r15
testb $0x01, %cl
jz L_256_mod_inv_4_usubv_even_start
L_256_mod_inv_4_usubv_even_end:
cmpq $0x01, %rcx
jne L_256_mod_inv_4_uv_start
movq %r8, %rsi
orq %r9, %rsi
jne L_256_mod_inv_4_uv_start
orq %r10, %rsi
jne L_256_mod_inv_4_uv_start
movb $0x01, %al
jmp L_256_mod_inv_4_uv_end
L_256_mod_inv_4_uv_v:
movb $3, (%rsp,%r15,1)
incq %r15
subq %rcx, %r11
sbbq %r8, %r12
sbbq %r9, %r13
sbbq %r10, %r14
shrdq $0x01, %r12, %r11
shrdq $0x01, %r13, %r12
shrdq $0x01, %r14, %r13
shrq $0x01, %r14
testb $0x01, %r11b
jnz L_256_mod_inv_4_vsubu_even_end
L_256_mod_inv_4_vsubu_even_start:
shrdq $0x01, %r12, %r11
shrdq $0x01, %r13, %r12
shrdq $0x01, %r14, %r13
shrq $0x01, %r14
movb $0x01, (%rsp,%r15,1)
incq %r15
testb $0x01, %r11b
jz L_256_mod_inv_4_vsubu_even_start
L_256_mod_inv_4_vsubu_even_end:
cmpq $0x01, %r11
jne L_256_mod_inv_4_uv_start
movq %r12, %rsi
orq %r13, %rsi
jne L_256_mod_inv_4_uv_start
orq %r14, %rsi
jne L_256_mod_inv_4_uv_start
movb $0x00, %al
L_256_mod_inv_4_uv_end:
movq (%rdx), %rcx
movq 8(%rdx), %r8
movq 16(%rdx), %r9
movq 24(%rdx), %r10
movq $0x01, %r11
xorq %r12, %r12
xorq %r13, %r13
xorq %r14, %r14
movb $7, (%rsp,%r15,1)
movb (%rsp), %sil
movq $0x01, %r15
cmpb $0x01, %sil
je L_256_mod_inv_4_op_div2_d
jl L_256_mod_inv_4_op_div2_b
cmpb $3, %sil
je L_256_mod_inv_4_op_d_sub_b
jl L_256_mod_inv_4_op_b_sub_d
jmp L_256_mod_inv_4_op_end
L_256_mod_inv_4_op_b_sub_d:
subq %r11, %rcx
sbbq %r12, %r8
sbbq %r13, %r9
sbbq %r14, %r10
jnc L_256_mod_inv_4_op_div2_b
addq (%rdx), %rcx
adcq 8(%rdx), %r8
adcq 16(%rdx), %r9
adcq 24(%rdx), %r10
L_256_mod_inv_4_op_div2_b:
testb $0x01, %cl
movq $0x00, %rsi
jz L_256_mod_inv_4_op_div2_b_mod
addq (%rdx), %rcx
adcq 8(%rdx), %r8
adcq 16(%rdx), %r9
adcq 24(%rdx), %r10
adcq $0x00, %rsi
L_256_mod_inv_4_op_div2_b_mod:
shrdq $0x01, %r8, %rcx
shrdq $0x01, %r9, %r8
shrdq $0x01, %r10, %r9
shrdq $0x01, %rsi, %r10
movb (%rsp,%r15,1), %sil
incq %r15
cmpb $0x01, %sil
je L_256_mod_inv_4_op_div2_d
jl L_256_mod_inv_4_op_div2_b
cmpb $3, %sil
je L_256_mod_inv_4_op_d_sub_b
jl L_256_mod_inv_4_op_b_sub_d
jmp L_256_mod_inv_4_op_end
L_256_mod_inv_4_op_d_sub_b:
subq %rcx, %r11
sbbq %r8, %r12
sbbq %r9, %r13
sbbq %r10, %r14
jnc L_256_mod_inv_4_op_div2_d
addq (%rdx), %r11
adcq 8(%rdx), %r12
adcq 16(%rdx), %r13
adcq 24(%rdx), %r14
L_256_mod_inv_4_op_div2_d:
testb $0x01, %r11b
movq $0x00, %rsi
jz L_256_mod_inv_4_op_div2_d_mod
addq (%rdx), %r11
adcq 8(%rdx), %r12
adcq 16(%rdx), %r13
adcq 24(%rdx), %r14
adcq $0x00, %rsi
L_256_mod_inv_4_op_div2_d_mod:
shrdq $0x01, %r12, %r11
shrdq $0x01, %r13, %r12
shrdq $0x01, %r14, %r13
shrdq $0x01, %rsi, %r14
movb (%rsp,%r15,1), %sil
incq %r15
cmpb $0x01, %sil
je L_256_mod_inv_4_op_div2_d
jl L_256_mod_inv_4_op_div2_b
cmpb $3, %sil
je L_256_mod_inv_4_op_d_sub_b
jl L_256_mod_inv_4_op_b_sub_d
L_256_mod_inv_4_op_end:
cmpb $0x01, %al
jne L_256_mod_inv_4_store_d
movq %rcx, (%rdi)
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq %r10, 24(%rdi)
jmp L_256_mod_inv_4_store_end
L_256_mod_inv_4_store_d:
movq %r11, (%rdi)
movq %r12, 8(%rdi)
movq %r13, 16(%rdi)
movq %r14, 24(%rdi)
L_256_mod_inv_4_store_end:
addq $0x201, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_256_mod_inv_4,.-sp_256_mod_inv_4
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_sp256_mod_inv_avx2_4_order:
.long 0x632551,0x1e84f3b,0x3bce6fa,0x3ffffff
.long 0x3ff0000,0x0,0x0,0x0
.long 0x272b0bf,0x2b69c5e,0x3ffffff,0x3ff
.long 0x3fffff,0x0,0x0,0x0
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_sp256_mod_inv_avx2_4_one:
.quad 0x1, 0x0
.quad 0x0, 0x0
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_sp256_mod_inv_avx2_4_all_one:
.long 0x1,0x1,0x1,0x1
.long 0x1,0x1,0x1,0x1
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_sp256_mod_inv_avx2_4_mask01111:
.long 0x0,0x1,0x1,0x1
.long 0x1,0x0,0x0,0x0
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_sp256_mod_inv_avx2_4_down_one_dword:
.long 0x1,0x2,0x3,0x4
.long 0x5,0x6,0x7,0x7
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_sp256_mod_inv_avx2_4_neg:
.long 0x0,0x0,0x0,0x0
.long 0x80000000,0x0,0x0,0x0
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_sp256_mod_inv_avx2_4_up_one_dword:
.long 0x7,0x0,0x1,0x2
.long 0x3,0x7,0x7,0x7
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_sp256_mod_inv_avx2_4_mask26:
.long 0x3ffffff,0x3ffffff,0x3ffffff,0x3ffffff
.long 0x3ffffff,0x0,0x0,0x0
/* Non-constant time modular inversion.
*
* @param [out] r Resulting number.
* @param [in] a Number to invert.
* @param [in] m Modulus.
* @return MP_OKAY on success.
*/
#ifndef __APPLE__
.text
.globl sp_256_mod_inv_avx2_4
.type sp_256_mod_inv_avx2_4,@function
.align 16
sp_256_mod_inv_avx2_4:
#else
.section __TEXT,__text
.globl _sp_256_mod_inv_avx2_4
.p2align 4
_sp_256_mod_inv_avx2_4:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
movq (%rdx), %rax
movq 8(%rdx), %rcx
movq 16(%rdx), %r8
movq 24(%rdx), %r9
movq (%rsi), %r10
movq 8(%rsi), %r11
movq 16(%rsi), %r12
movq 24(%rsi), %r13
leaq L_sp256_mod_inv_avx2_4_order(%rip), %rbx
vmovupd (%rbx), %ymm6
vmovupd 32(%rbx), %ymm7
leaq L_sp256_mod_inv_avx2_4_one(%rip), %rbx
vmovupd (%rbx), %ymm8
leaq L_sp256_mod_inv_avx2_4_mask01111(%rip), %rbx
vmovupd (%rbx), %ymm9
leaq L_sp256_mod_inv_avx2_4_all_one(%rip), %rbx
vmovupd (%rbx), %ymm10
leaq L_sp256_mod_inv_avx2_4_down_one_dword(%rip), %rbx
vmovupd (%rbx), %ymm11
leaq L_sp256_mod_inv_avx2_4_neg(%rip), %rbx
vmovupd (%rbx), %ymm12
leaq L_sp256_mod_inv_avx2_4_up_one_dword(%rip), %rbx
vmovupd (%rbx), %ymm13
leaq L_sp256_mod_inv_avx2_4_mask26(%rip), %rbx
vmovupd (%rbx), %ymm14
vpxor %xmm0, %xmm0, %xmm0
vpxor %xmm1, %xmm1, %xmm1
vmovdqu %ymm8, %ymm2
vpxor %xmm3, %xmm3, %xmm3
testb $0x01, %r10b
jnz L_256_mod_inv_avx2_4_v_even_end
L_256_mod_inv_avx2_4_v_even_start:
shrdq $0x01, %r11, %r10
shrdq $0x01, %r12, %r11
shrdq $0x01, %r13, %r12
shrq $0x01, %r13
vptest %ymm8, %ymm2
jz L_256_mod_inv_avx2_4_v_even_shr1
vpaddd %ymm6, %ymm2, %ymm2
vpaddd %ymm7, %ymm3, %ymm3
L_256_mod_inv_avx2_4_v_even_shr1:
vpand %ymm9, %ymm2, %ymm4
vpand %ymm10, %ymm3, %ymm5
vpermd %ymm4, %ymm11, %ymm4
vpsrad $0x01, %ymm2, %ymm2
vpsrad $0x01, %ymm3, %ymm3
vpslld $25, %ymm5, %ymm5
vpslld $25, %xmm4, %xmm4
vpaddd %ymm5, %ymm2, %ymm2
vpaddd %ymm4, %ymm3, %ymm3
testb $0x01, %r10b
jz L_256_mod_inv_avx2_4_v_even_start
L_256_mod_inv_avx2_4_v_even_end:
L_256_mod_inv_avx2_4_uv_start:
cmpq %r13, %r9
jb L_256_mod_inv_avx2_4_uv_v
ja L_256_mod_inv_avx2_4_uv_u
cmpq %r12, %r8
jb L_256_mod_inv_avx2_4_uv_v
ja L_256_mod_inv_avx2_4_uv_u
cmpq %r11, %rcx
jb L_256_mod_inv_avx2_4_uv_v
ja L_256_mod_inv_avx2_4_uv_u
cmpq %r10, %rax
jb L_256_mod_inv_avx2_4_uv_v
L_256_mod_inv_avx2_4_uv_u:
subq %r10, %rax
sbbq %r11, %rcx
vpsubd %ymm2, %ymm0, %ymm0
sbbq %r12, %r8
vpsubd %ymm3, %ymm1, %ymm1
sbbq %r13, %r9
vptest %ymm12, %ymm1
jz L_256_mod_inv_avx2_4_usubv_done_neg
vpaddd %ymm6, %ymm0, %ymm0
vpaddd %ymm7, %ymm1, %ymm1
L_256_mod_inv_avx2_4_usubv_done_neg:
L_256_mod_inv_avx2_4_usubv_shr1:
shrdq $0x01, %rcx, %rax
shrdq $0x01, %r8, %rcx
shrdq $0x01, %r9, %r8
shrq $0x01, %r9
vptest %ymm8, %ymm0
jz L_256_mod_inv_avx2_4_usubv_sub_shr1
vpaddd %ymm6, %ymm0, %ymm0
vpaddd %ymm7, %ymm1, %ymm1
L_256_mod_inv_avx2_4_usubv_sub_shr1:
vpand %ymm9, %ymm0, %ymm4
vpand %ymm10, %ymm1, %ymm5
vpermd %ymm4, %ymm11, %ymm4
vpsrad $0x01, %ymm0, %ymm0
vpsrad $0x01, %ymm1, %ymm1
vpslld $25, %ymm5, %ymm5
vpslld $25, %xmm4, %xmm4
vpaddd %ymm5, %ymm0, %ymm0
vpaddd %ymm4, %ymm1, %ymm1
testb $0x01, %al
jz L_256_mod_inv_avx2_4_usubv_shr1
cmpq $0x01, %rax
jne L_256_mod_inv_avx2_4_uv_start
movq %rcx, %rsi
orq %r8, %rsi
jne L_256_mod_inv_avx2_4_uv_start
orq %r9, %rsi
jne L_256_mod_inv_avx2_4_uv_start
vpextrd $0x00, %xmm0, %eax
vpextrd $0x01, %xmm0, %r8d
vpextrd $2, %xmm0, %r10d
vpextrd $3, %xmm0, %r12d
vpextrd $0x00, %xmm1, %ecx
vpextrd $0x01, %xmm1, %r9d
vpextrd $2, %xmm1, %r11d
vpextrd $3, %xmm1, %r13d
vextracti128 $0x01, %ymm0, %xmm0
vextracti128 $0x01, %ymm1, %xmm1
vpextrd $0x00, %xmm0, %r14d
vpextrd $0x00, %xmm1, %r15d
jmp L_256_mod_inv_avx2_4_store_done
L_256_mod_inv_avx2_4_uv_v:
subq %rax, %r10
sbbq %rcx, %r11
vpsubd %ymm0, %ymm2, %ymm2
sbbq %r8, %r12
vpsubd %ymm1, %ymm3, %ymm3
sbbq %r9, %r13
vptest %ymm12, %ymm3
jz L_256_mod_inv_avx2_4_vsubu_done_neg
vpaddd %ymm6, %ymm2, %ymm2
vpaddd %ymm7, %ymm3, %ymm3
L_256_mod_inv_avx2_4_vsubu_done_neg:
L_256_mod_inv_avx2_4_vsubu_shr1:
shrdq $0x01, %r11, %r10
shrdq $0x01, %r12, %r11
shrdq $0x01, %r13, %r12
shrq $0x01, %r13
vptest %ymm8, %ymm2
jz L_256_mod_inv_avx2_4_vsubu_sub_shr1
vpaddd %ymm6, %ymm2, %ymm2
vpaddd %ymm7, %ymm3, %ymm3
L_256_mod_inv_avx2_4_vsubu_sub_shr1:
vpand %ymm9, %ymm2, %ymm4
vpand %ymm10, %ymm3, %ymm5
vpermd %ymm4, %ymm11, %ymm4
vpsrad $0x01, %ymm2, %ymm2
vpsrad $0x01, %ymm3, %ymm3
vpslld $25, %ymm5, %ymm5
vpslld $25, %xmm4, %xmm4
vpaddd %ymm5, %ymm2, %ymm2
vpaddd %ymm4, %ymm3, %ymm3
testb $0x01, %r10b
jz L_256_mod_inv_avx2_4_vsubu_shr1
cmpq $0x01, %r10
jne L_256_mod_inv_avx2_4_uv_start
movq %r11, %rsi
orq %r12, %rsi
jne L_256_mod_inv_avx2_4_uv_start
orq %r13, %rsi
jne L_256_mod_inv_avx2_4_uv_start
vpextrd $0x00, %xmm2, %eax
vpextrd $0x01, %xmm2, %r8d
vpextrd $2, %xmm2, %r10d
vpextrd $3, %xmm2, %r12d
vpextrd $0x00, %xmm3, %ecx
vpextrd $0x01, %xmm3, %r9d
vpextrd $2, %xmm3, %r11d
vpextrd $3, %xmm3, %r13d
vextracti128 $0x01, %ymm2, %xmm2
vextracti128 $0x01, %ymm3, %xmm3
vpextrd $0x00, %xmm2, %r14d
vpextrd $0x00, %xmm3, %r15d
L_256_mod_inv_avx2_4_store_done:
movl %eax, %esi
andl $0x3ffffff, %eax
sarl $26, %esi
addl %esi, %ecx
movl %ecx, %esi
andl $0x3ffffff, %ecx
sarl $26, %esi
addl %esi, %r8d
movl %r8d, %esi
andl $0x3ffffff, %r8d
sarl $26, %esi
addl %esi, %r9d
movl %r9d, %esi
andl $0x3ffffff, %r9d
sarl $26, %esi
addl %esi, %r10d
movl %r10d, %esi
andl $0x3ffffff, %r10d
sarl $26, %esi
addl %esi, %r11d
movl %r11d, %esi
andl $0x3ffffff, %r11d
sarl $26, %esi
addl %esi, %r12d
movl %r12d, %esi
andl $0x3ffffff, %r12d
sarl $26, %esi
addl %esi, %r13d
movl %r13d, %esi
andl $0x3ffffff, %r13d
sarl $26, %esi
addl %esi, %r14d
movl %r14d, %esi
andl $0x3ffffff, %r14d
sarl $26, %esi
addl %esi, %r15d
movslq %ecx, %rcx
movslq %r9d, %r9
movslq %r11d, %r11
movslq %r13d, %r13
movslq %r15d, %r15
shlq $26, %rcx
shlq $26, %r9
shlq $26, %r11
shlq $26, %r13
shlq $26, %r15
movslq %eax, %rax
addq %rcx, %rax
movslq %r8d, %r8
adcq %r9, %r8
movslq %r10d, %r10
adcq %r11, %r10
movslq %r12d, %r12
adcq %r13, %r12
movslq %r14d, %r14
adcq %r15, %r14
jge L_256_mod_inv_avx2_4_3_no_add_order
movq $0x9cac2fc632551, %rcx
movq $0xada7179e84f3b, %r9
movq $0xfffffffbce6fa, %r11
movq $0xfffffffff, %r13
movq $0xffffffff0000, %r15
addq %rcx, %rax
addq %r9, %r8
addq %r11, %r10
addq %r13, %r12
addq %r15, %r14
movq $0xfffffffffffff, %rsi
movq %rax, %rcx
andq %rsi, %rax
sarq $52, %rcx
addq %rcx, %r8
movq %r8, %r9
andq %rsi, %r8
sarq $52, %r9
addq %r9, %r10
movq %r10, %r11
andq %rsi, %r10
sarq $52, %r11
addq %r11, %r12
movq %r12, %r13
andq %rsi, %r12
sarq $52, %r13
addq %r13, %r14
L_256_mod_inv_avx2_4_3_no_add_order:
movq %r8, %rcx
movq %r10, %r9
movq %r12, %r11
shlq $52, %rcx
sarq $12, %r8
shlq $40, %r9
sarq $24, %r10
shlq $28, %r11
sarq $36, %r12
shlq $16, %r14
addq %rcx, %rax
adcq %r9, %r8
adcq %r11, %r10
adcq %r14, %r12
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq %r10, 16(%rdi)
movq %r12, 24(%rdi)
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_256_mod_inv_avx2_4,.-sp_256_mod_inv_avx2_4
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* !WOLFSSL_SP_NO_256 */
#ifdef WOLFSSL_SP_384
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_384_mul_6
.type sp_384_mul_6,@function
.align 16
sp_384_mul_6:
#else
.section __TEXT,__text
.globl _sp_384_mul_6
.p2align 4
_sp_384_mul_6:
#endif /* __APPLE__ */
movq %rdx, %rcx
subq $48, %rsp
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
movq %rax, (%rsp)
movq %rdx, %r9
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 8(%rsp)
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 16(%rsp)
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 24(%rsp)
# A[0] * B[4]
movq 32(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[0]
movq (%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 32(%rsp)
# A[0] * B[5]
movq 40(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[4]
movq 32(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[1]
movq 8(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[0]
movq (%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 40(%rsp)
# A[1] * B[5]
movq 40(%rcx), %rax
mulq 8(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[4]
movq 32(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[2]
movq 16(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[1]
movq 8(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 48(%rdi)
# A[2] * B[5]
movq 40(%rcx), %rax
mulq 16(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[4]
movq 32(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[3]
movq 24(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[2]
movq 16(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 56(%rdi)
# A[3] * B[5]
movq 40(%rcx), %rax
mulq 24(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[4]
movq 32(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[3]
movq 24(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 64(%rdi)
# A[4] * B[5]
movq 40(%rcx), %rax
mulq 32(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[4]
movq 32(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 72(%rdi)
# A[5] * B[5]
movq 40(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
movq %r9, 80(%rdi)
movq %r10, 88(%rdi)
movq (%rsp), %rax
movq 8(%rsp), %rdx
movq 16(%rsp), %r8
movq 24(%rsp), %r9
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rsp), %rax
movq 40(%rsp), %rdx
movq %rax, 32(%rdi)
movq %rdx, 40(%rdi)
addq $48, %rsp
repz retq
#ifndef __APPLE__
.size sp_384_mul_6,.-sp_384_mul_6
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Multiply a and b into r. (r = a * b)
*
* r Result of multiplication.
* a First number to multiply.
* b Second number to multiply.
*/
#ifndef __APPLE__
.text
.globl sp_384_mul_avx2_6
.type sp_384_mul_avx2_6,@function
.align 16
sp_384_mul_avx2_6:
#else
.section __TEXT,__text
.globl _sp_384_mul_avx2_6
.p2align 4
_sp_384_mul_avx2_6:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
movq %rdx, %rax
subq $40, %rsp
xorq %rbx, %rbx
movq (%rsi), %rdx
# A[0] * B[0]
mulxq (%rax), %r9, %r10
# A[0] * B[1]
mulxq 8(%rax), %rcx, %r11
adcxq %rcx, %r10
# A[0] * B[2]
mulxq 16(%rax), %rcx, %r12
adcxq %rcx, %r11
# A[0] * B[3]
mulxq 24(%rax), %rcx, %r13
adcxq %rcx, %r12
# A[0] * B[4]
mulxq 32(%rax), %rcx, %r14
adcxq %rcx, %r13
# A[0] * B[5]
mulxq 40(%rax), %rcx, %r15
adcxq %rcx, %r14
adcxq %rbx, %r15
movq %r9, (%rsp)
movq $0x00, %r9
adcxq %rbx, %r9
xorq %rbx, %rbx
movq 8(%rsi), %rdx
# A[1] * B[0]
mulxq (%rax), %rcx, %r8
adcxq %rcx, %r10
adoxq %r8, %r11
# A[1] * B[1]
mulxq 8(%rax), %rcx, %r8
adcxq %rcx, %r11
adoxq %r8, %r12
# A[1] * B[2]
mulxq 16(%rax), %rcx, %r8
adcxq %rcx, %r12
adoxq %r8, %r13
# A[1] * B[3]
mulxq 24(%rax), %rcx, %r8
adcxq %rcx, %r13
adoxq %r8, %r14
# A[1] * B[4]
mulxq 32(%rax), %rcx, %r8
adcxq %rcx, %r14
adoxq %r8, %r15
# A[1] * B[5]
mulxq 40(%rax), %rcx, %r8
adcxq %rcx, %r15
adoxq %r8, %r9
adcxq %rbx, %r9
movq %r10, 8(%rsp)
movq $0x00, %r10
adcxq %rbx, %r10
adoxq %rbx, %r10
xorq %rbx, %rbx
movq 16(%rsi), %rdx
# A[2] * B[0]
mulxq (%rax), %rcx, %r8
adcxq %rcx, %r11
adoxq %r8, %r12
# A[2] * B[1]
mulxq 8(%rax), %rcx, %r8
adcxq %rcx, %r12
adoxq %r8, %r13
# A[2] * B[2]
mulxq 16(%rax), %rcx, %r8
adcxq %rcx, %r13
adoxq %r8, %r14
# A[2] * B[3]
mulxq 24(%rax), %rcx, %r8
adcxq %rcx, %r14
adoxq %r8, %r15
# A[2] * B[4]
mulxq 32(%rax), %rcx, %r8
adcxq %rcx, %r15
adoxq %r8, %r9
# A[2] * B[5]
mulxq 40(%rax), %rcx, %r8
adcxq %rcx, %r9
adoxq %r8, %r10
adcxq %rbx, %r10
movq %r11, 16(%rsp)
movq $0x00, %r11
adcxq %rbx, %r11
adoxq %rbx, %r11
xorq %rbx, %rbx
movq 24(%rsi), %rdx
# A[3] * B[0]
mulxq (%rax), %rcx, %r8
adcxq %rcx, %r12
adoxq %r8, %r13
# A[3] * B[1]
mulxq 8(%rax), %rcx, %r8
adcxq %rcx, %r13
adoxq %r8, %r14
# A[3] * B[2]
mulxq 16(%rax), %rcx, %r8
adcxq %rcx, %r14
adoxq %r8, %r15
# A[3] * B[3]
mulxq 24(%rax), %rcx, %r8
adcxq %rcx, %r15
adoxq %r8, %r9
# A[3] * B[4]
mulxq 32(%rax), %rcx, %r8
adcxq %rcx, %r9
adoxq %r8, %r10
# A[3] * B[5]
mulxq 40(%rax), %rcx, %r8
adcxq %rcx, %r10
adoxq %r8, %r11
adcxq %rbx, %r11
movq %r12, 24(%rsp)
movq $0x00, %r12
adcxq %rbx, %r12
adoxq %rbx, %r12
xorq %rbx, %rbx
movq 32(%rsi), %rdx
# A[4] * B[0]
mulxq (%rax), %rcx, %r8
adcxq %rcx, %r13
adoxq %r8, %r14
# A[4] * B[1]
mulxq 8(%rax), %rcx, %r8
adcxq %rcx, %r14
adoxq %r8, %r15
# A[4] * B[2]
mulxq 16(%rax), %rcx, %r8
adcxq %rcx, %r15
adoxq %r8, %r9
# A[4] * B[3]
mulxq 24(%rax), %rcx, %r8
adcxq %rcx, %r9
adoxq %r8, %r10
# A[4] * B[4]
mulxq 32(%rax), %rcx, %r8
adcxq %rcx, %r10
adoxq %r8, %r11
# A[4] * B[5]
mulxq 40(%rax), %rcx, %r8
adcxq %rcx, %r11
adoxq %r8, %r12
adcxq %rbx, %r12
movq %r13, 32(%rsp)
movq 40(%rsi), %rdx
# A[5] * B[0]
mulxq (%rax), %rcx, %r8
adcxq %rcx, %r14
adoxq %r8, %r15
# A[5] * B[1]
mulxq 8(%rax), %rcx, %r8
adcxq %rcx, %r15
adoxq %r8, %r9
# A[5] * B[2]
mulxq 16(%rax), %rcx, %r8
adcxq %rcx, %r9
adoxq %r8, %r10
# A[5] * B[3]
mulxq 24(%rax), %rcx, %r8
adcxq %rcx, %r10
adoxq %r8, %r11
# A[5] * B[4]
mulxq 32(%rax), %rcx, %r8
adcxq %rcx, %r11
adoxq %r8, %r12
# A[5] * B[5]
mulxq 40(%rax), %rcx, %r13
adcxq %rcx, %r12
adoxq %rbx, %r13
adcxq %rbx, %r13
movq %r14, 40(%rdi)
movq %r15, 48(%rdi)
movq %r9, 56(%rdi)
movq %r10, 64(%rdi)
movq %r11, 72(%rdi)
movq %r12, 80(%rdi)
movq %r13, 88(%rdi)
movq (%rsp), %r9
movq 8(%rsp), %r10
movq 16(%rsp), %r11
movq 24(%rsp), %r12
movq 32(%rsp), %r13
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
movq %r13, 32(%rdi)
addq $40, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_384_mul_avx2_6,.-sp_384_mul_avx2_6
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Square a and put result in r. (r = a * a)
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_384_sqr_6
.type sp_384_sqr_6,@function
.align 16
sp_384_sqr_6:
#else
.section __TEXT,__text
.globl _sp_384_sqr_6
.p2align 4
_sp_384_sqr_6:
#endif /* __APPLE__ */
pushq %r12
subq $48, %rsp
# A[0] * A[0]
movq (%rsi), %rax
mulq %rax
xorq %r9, %r9
movq %rax, (%rsp)
movq %rdx, %r8
# A[0] * A[1]
movq 8(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 8(%rsp)
# A[0] * A[2]
movq 16(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
# A[1] * A[1]
movq 8(%rsi), %rax
mulq %rax
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 16(%rsp)
# A[0] * A[3]
movq 24(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * A[2]
movq 16(%rsi), %rax
mulq 8(%rsi)
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 24(%rsp)
# A[0] * A[4]
movq 32(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[1] * A[3]
movq 24(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[2] * A[2]
movq 16(%rsi), %rax
mulq %rax
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 32(%rsp)
# A[0] * A[5]
movq 40(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[4]
movq 32(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[3]
movq 24(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 40(%rsp)
# A[1] * A[5]
movq 40(%rsi), %rax
mulq 8(%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * A[4]
movq 32(%rsi), %rax
mulq 16(%rsi)
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * A[3]
movq 24(%rsi), %rax
mulq %rax
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 48(%rdi)
# A[2] * A[5]
movq 40(%rsi), %rax
mulq 16(%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[3] * A[4]
movq 32(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 56(%rdi)
# A[3] * A[5]
movq 40(%rsi), %rax
mulq 24(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
# A[4] * A[4]
movq 32(%rsi), %rax
mulq %rax
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 64(%rdi)
# A[4] * A[5]
movq 40(%rsi), %rax
mulq 32(%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 72(%rdi)
# A[5] * A[5]
movq 40(%rsi), %rax
mulq %rax
addq %rax, %r8
adcq %rdx, %r9
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
movq (%rsp), %rax
movq 8(%rsp), %rdx
movq 16(%rsp), %r10
movq 24(%rsp), %r11
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq 32(%rsp), %rax
movq 40(%rsp), %rdx
movq %rax, 32(%rdi)
movq %rdx, 40(%rdi)
addq $48, %rsp
popq %r12
repz retq
#ifndef __APPLE__
.size sp_384_sqr_6,.-sp_384_sqr_6
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Square a and put result in r. (r = a * a)
*
* r Result of squaring.
* a Number to square in Montgomery form.
*/
#ifndef __APPLE__
.text
.globl sp_384_sqr_avx2_6
.type sp_384_sqr_avx2_6,@function
.align 16
sp_384_sqr_avx2_6:
#else
.section __TEXT,__text
.globl _sp_384_sqr_avx2_6
.p2align 4
_sp_384_sqr_avx2_6:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
push %rdi
xorq %rdi, %rdi
movq (%rsi), %rdx
movq 8(%rsi), %r15
movq 16(%rsi), %rbx
movq 24(%rsi), %rbp
# Diagonal 0
# A[1] * A[0]
mulxq 8(%rsi), %r8, %r9
# A[2] * A[0]
mulxq 16(%rsi), %rax, %r10
adcxq %rax, %r9
# A[3] * A[0]
mulxq 24(%rsi), %rax, %r11
adcxq %rax, %r10
# A[4] * A[0]
mulxq 32(%rsi), %rax, %r12
adcxq %rax, %r11
# A[5] * A[0]
mulxq 40(%rsi), %rax, %r13
adcxq %rax, %r12
adcxq %rdi, %r13
# Diagonal 1
movq %r15, %rdx
# A[2] * A[1]
mulxq 16(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[3] * A[1]
mulxq 24(%rsi), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[4] * A[1]
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r13
# A[5] * A[1]
mulxq 40(%rsi), %rax, %r14
adcxq %rax, %r13
adoxq %rdi, %r14
movq %rbx, %rdx
# A[5] * A[2]
mulxq 40(%rsi), %rax, %r15
adcxq %rax, %r14
adoxq %rdi, %r15
adcxq %rdi, %r15
adcxq %rdi, %rbx
# Diagonal 2
# A[3] * A[2]
mulxq 24(%rsi), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r13
# A[4] * A[2]
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
movq %rbp, %rdx
# A[4] * A[3]
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# A[5] * A[3]
mulxq 40(%rsi), %rax, %rbx
adcxq %rax, %r15
adoxq %rdi, %rbx
movq 32(%rsi), %rdx
# A[5] * A[4]
mulxq 40(%rsi), %rax, %rbp
adcxq %rax, %rbx
adoxq %rdi, %rbp
adcxq %rdi, %rbp
adcxq %rdi, %rdi
# Doubling previous result as we add in square words results
# A[0] * A[0]
movq (%rsi), %rdx
mulxq %rdx, %rax, %rcx
pop %rdx
movq %rax, (%rdx)
adoxq %r8, %r8
push %rdx
adcxq %rcx, %r8
# A[1] * A[1]
movq 8(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r9, %r9
adcxq %rax, %r9
adoxq %r10, %r10
adcxq %rcx, %r10
# A[2] * A[2]
movq 16(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r11, %r11
adcxq %rax, %r11
adoxq %r12, %r12
adcxq %rcx, %r12
# A[3] * A[3]
movq 24(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r13, %r13
adcxq %rax, %r13
adoxq %r14, %r14
adcxq %rcx, %r14
# A[4] * A[4]
movq 32(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r15, %r15
adcxq %rax, %r15
adoxq %rbx, %rbx
adcxq %rcx, %rbx
# A[5] * A[5]
movq 40(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %rbp, %rbp
adcxq %rax, %rbp
adcxq %rdi, %rcx
movq $0x00, %rax
adoxq %rax, %rcx
pop %rdi
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq %r10, 24(%rdi)
movq %r11, 32(%rdi)
movq %r12, 40(%rdi)
movq %r13, 48(%rdi)
movq %r14, 56(%rdi)
movq %r15, 64(%rdi)
movq %rbx, 72(%rdi)
movq %rbp, 80(%rdi)
movq %rcx, 88(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_384_sqr_avx2_6,.-sp_384_sqr_avx2_6
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Add b to a into r. (r = a + b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_384_add_6
.type sp_384_add_6,@function
.align 16
sp_384_add_6:
#else
.section __TEXT,__text
.globl _sp_384_add_6
.p2align 4
_sp_384_add_6:
#endif /* __APPLE__ */
pushq %r12
xorq %rax, %rax
movq (%rsi), %rcx
movq 8(%rsi), %r8
movq 16(%rsi), %r9
movq 24(%rsi), %r10
movq 32(%rsi), %r11
movq 40(%rsi), %r12
addq (%rdx), %rcx
adcq 8(%rdx), %r8
adcq 16(%rdx), %r9
adcq 24(%rdx), %r10
adcq 32(%rdx), %r11
adcq 40(%rdx), %r12
movq %rcx, (%rdi)
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq %r10, 24(%rdi)
movq %r11, 32(%rdi)
movq %r12, 40(%rdi)
adcq $0x00, %rax
popq %r12
repz retq
#ifndef __APPLE__
.size sp_384_add_6,.-sp_384_add_6
#endif /* __APPLE__ */
/* Sub b from a into r. (r = a - b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_384_sub_6
.type sp_384_sub_6,@function
.align 16
sp_384_sub_6:
#else
.section __TEXT,__text
.globl _sp_384_sub_6
.p2align 4
_sp_384_sub_6:
#endif /* __APPLE__ */
pushq %r12
xorq %rax, %rax
movq (%rsi), %rcx
movq 8(%rsi), %r8
movq 16(%rsi), %r9
movq 24(%rsi), %r10
movq 32(%rsi), %r11
movq 40(%rsi), %r12
subq (%rdx), %rcx
sbbq 8(%rdx), %r8
sbbq 16(%rdx), %r9
sbbq 24(%rdx), %r10
sbbq 32(%rdx), %r11
sbbq 40(%rdx), %r12
movq %rcx, (%rdi)
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq %r10, 24(%rdi)
movq %r11, 32(%rdi)
movq %r12, 40(%rdi)
sbbq %rax, %rax
popq %r12
repz retq
#ifndef __APPLE__
.size sp_384_sub_6,.-sp_384_sub_6
#endif /* __APPLE__ */
/* Conditionally copy a into r using the mask m.
* m is -1 to copy and 0 when not.
*
* r A single precision number to copy over.
* a A single precision number to copy.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_384_cond_copy_6
.type sp_384_cond_copy_6,@function
.align 16
sp_384_cond_copy_6:
#else
.section __TEXT,__text
.globl _sp_384_cond_copy_6
.p2align 4
_sp_384_cond_copy_6:
#endif /* __APPLE__ */
movq (%rdi), %rax
movq 8(%rdi), %rcx
movq 16(%rdi), %r8
movq 24(%rdi), %r9
movq 32(%rdi), %r10
movq 40(%rdi), %r11
xorq (%rsi), %rax
xorq 8(%rsi), %rcx
xorq 16(%rsi), %r8
xorq 24(%rsi), %r9
xorq 32(%rsi), %r10
xorq 40(%rsi), %r11
andq %rdx, %rax
andq %rdx, %rcx
andq %rdx, %r8
andq %rdx, %r9
andq %rdx, %r10
andq %rdx, %r11
xorq %rax, (%rdi)
xorq %rcx, 8(%rdi)
xorq %r8, 16(%rdi)
xorq %r9, 24(%rdi)
xorq %r10, 32(%rdi)
xorq %r11, 40(%rdi)
repz retq
#ifndef __APPLE__
.size sp_384_cond_copy_6,.-sp_384_cond_copy_6
#endif /* __APPLE__ */
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_384_cond_sub_6
.type sp_384_cond_sub_6,@function
.align 16
sp_384_cond_sub_6:
#else
.section __TEXT,__text
.globl _sp_384_cond_sub_6
.p2align 4
_sp_384_cond_sub_6:
#endif /* __APPLE__ */
subq $48, %rsp
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq (%rsi), %r8
movq (%rsp), %rdx
subq %rdx, %r8
movq 8(%rsi), %r9
movq 8(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, (%rdi)
movq 16(%rsi), %r8
movq 16(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 8(%rdi)
movq 24(%rsi), %r9
movq 24(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 16(%rdi)
movq 32(%rsi), %r8
movq 32(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 24(%rdi)
movq 40(%rsi), %r9
movq 40(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 32(%rdi)
movq %r9, 40(%rdi)
sbbq %rax, %rax
addq $48, %rsp
repz retq
#ifndef __APPLE__
.size sp_384_cond_sub_6,.-sp_384_cond_sub_6
#endif /* __APPLE__ */
/* Reduce the number back to 384 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_384_mont_reduce_6
.type sp_384_mont_reduce_6,@function
.align 16
sp_384_mont_reduce_6:
#else
.section __TEXT,__text
.globl _sp_384_mont_reduce_6
.p2align 4
_sp_384_mont_reduce_6:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq (%rdi), %r11
movq 8(%rdi), %r12
movq 16(%rdi), %r13
movq 24(%rdi), %r14
movq 32(%rdi), %r15
movq 40(%rdi), %rsi
xorq %r10, %r10
# a[0-7] += m[0-5] * mu[0..1] = m[0-5] * (a[0..1] * mp)
movq 48(%rdi), %rbx
movq 56(%rdi), %rbp
movq %r11, %rdx
movq %r12, %rax
shldq $32, %rdx, %rax
shlq $32, %rdx
addq %r11, %rdx
adcq %r12, %rax
addq %r11, %rax
movq %rdx, %rcx
movq %rax, %r8
movq %rax, %r9
shldq $32, %rcx, %r8
shlq $32, %rcx
shrq $32, %r9
addq %rcx, %r11
adcq %r8, %r12
adcq %r9, %r13
adcq $0x00, %r14
adcq $0x00, %r15
adcq $0x00, %rsi
adcq %rdx, %rbx
adcq %rax, %rbp
adcq $0x00, %r10
addq %rax, %rcx
adcq %rdx, %r8
adcq %rax, %r9
movq $0x00, %rax
adcq $0x00, %rax
subq %r8, %r13
sbbq %r9, %r14
sbbq %rax, %r15
sbbq $0x00, %rsi
sbbq $0x00, %rbx
sbbq $0x00, %rbp
sbbq $0x00, %r10
# a[2-9] += m[0-5] * mu[0..1] = m[0-5] * (a[2..3] * mp)
movq 64(%rdi), %r11
movq 72(%rdi), %r12
movq %r13, %rdx
movq %r14, %rax
shldq $32, %rdx, %rax
shlq $32, %rdx
addq %r13, %rdx
adcq %r14, %rax
addq %r13, %rax
movq %rdx, %rcx
movq %rax, %r8
movq %rax, %r9
shldq $32, %rcx, %r8
shlq $32, %rcx
shrq $32, %r9
addq %r10, %r11
adcq $0x00, %r12
movq $0x00, %r10
adcq $0x00, %r10
addq %rcx, %r13
adcq %r8, %r14
adcq %r9, %r15
adcq $0x00, %rsi
adcq $0x00, %rbx
adcq $0x00, %rbp
adcq %rdx, %r11
adcq %rax, %r12
adcq $0x00, %r10
addq %rax, %rcx
adcq %rdx, %r8
adcq %rax, %r9
movq $0x00, %rax
adcq $0x00, %rax
subq %r8, %r15
sbbq %r9, %rsi
sbbq %rax, %rbx
sbbq $0x00, %rbp
sbbq $0x00, %r11
sbbq $0x00, %r12
sbbq $0x00, %r10
# a[4-11] += m[0-5] * mu[0..1] = m[0-5] * (a[4..5] * mp)
movq 80(%rdi), %r13
movq 88(%rdi), %r14
movq %r15, %rdx
movq %rsi, %rax
shldq $32, %rdx, %rax
shlq $32, %rdx
addq %r15, %rdx
adcq %rsi, %rax
addq %r15, %rax
movq %rdx, %rcx
movq %rax, %r8
movq %rax, %r9
shldq $32, %rcx, %r8
shlq $32, %rcx
shrq $32, %r9
addq %r10, %r13
adcq $0x00, %r14
movq $0x00, %r10
adcq $0x00, %r10
addq %rcx, %r15
adcq %r8, %rsi
adcq %r9, %rbx
adcq $0x00, %rbp
adcq $0x00, %r11
adcq $0x00, %r12
adcq %rdx, %r13
adcq %rax, %r14
adcq $0x00, %r10
addq %rax, %rcx
adcq %rdx, %r8
adcq %rax, %r9
movq $0x00, %rax
adcq $0x00, %rax
subq %r8, %rbx
sbbq %r9, %rbp
sbbq %rax, %r11
sbbq $0x00, %r12
sbbq $0x00, %r13
sbbq $0x00, %r14
sbbq $0x00, %r10
# Subtract mod if carry
negq %r10
movq $0xfffffffffffffffe, %r9
movl %r10d, %ecx
movq %r10, %r8
andq %r10, %r9
shlq $32, %r8
subq %rcx, %rbx
sbbq %r8, %rbp
sbbq %r9, %r11
sbbq %r10, %r12
sbbq %r10, %r13
sbbq %r10, %r14
movq %rbx, (%rdi)
movq %rbp, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
movq %r13, 32(%rdi)
movq %r14, 40(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_384_mont_reduce_6,.-sp_384_mont_reduce_6
#endif /* __APPLE__ */
/* Reduce the number back to 384 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_384_mont_reduce_order_6
.type sp_384_mont_reduce_order_6,@function
.align 16
sp_384_mont_reduce_order_6:
#else
.section __TEXT,__text
.globl _sp_384_mont_reduce_order_6
.p2align 4
_sp_384_mont_reduce_order_6:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rdx, %rcx
xorq %r15, %r15
# i = 6
movq $6, %r8
movq (%rdi), %r13
movq 8(%rdi), %r14
L_384_mont_reduce_order_6_loop:
# mu = a[i] * mp
movq %r13, %r11
imulq %rcx, %r11
# a[i+0] += m[0] * mu
movq %r11, %rax
xorq %r10, %r10
mulq (%rsi)
addq %rax, %r13
adcq %rdx, %r10
# a[i+1] += m[1] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 8(%rsi)
movq %r14, %r13
addq %rax, %r13
adcq %rdx, %r9
addq %r10, %r13
adcq $0x00, %r9
# a[i+2] += m[2] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 16(%rsi)
movq 16(%rdi), %r14
addq %rax, %r14
adcq %rdx, %r10
addq %r9, %r14
adcq $0x00, %r10
# a[i+3] += m[3] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 24(%rsi)
movq 24(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 24(%rdi)
adcq $0x00, %r9
# a[i+4] += m[4] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 32(%rsi)
movq 32(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 32(%rdi)
adcq $0x00, %r10
# a[i+5] += m[5] * mu
movq %r11, %rax
mulq 40(%rsi)
movq 40(%rdi), %r12
addq %rax, %r10
adcq %r15, %rdx
movq $0x00, %r15
adcq $0x00, %r15
addq %r10, %r12
movq %r12, 40(%rdi)
adcq %rdx, 48(%rdi)
adcq $0x00, %r15
# i -= 1
addq $8, %rdi
decq %r8
jnz L_384_mont_reduce_order_6_loop
movq %r13, (%rdi)
movq %r14, 8(%rdi)
negq %r15
#ifdef _WIN64
movq %rsi, %rdx
movq %r15, %rcx
#else
movq %r15, %rcx
movq %rsi, %rdx
#endif /* _WIN64 */
movq %rdi, %rsi
movq %rdi, %rdi
subq $48, %rdi
#ifndef __APPLE__
callq sp_384_cond_sub_6@plt
#else
callq _sp_384_cond_sub_6
#endif /* __APPLE__ */
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_384_mont_reduce_order_6,.-sp_384_mont_reduce_order_6
#endif /* __APPLE__ */
/* Compare a with b in constant time.
*
* a A single precision integer.
* b A single precision integer.
* return -ve, 0 or +ve if a is less than, equal to or greater than b
* respectively.
*/
#ifndef __APPLE__
.text
.globl sp_384_cmp_6
.type sp_384_cmp_6,@function
.align 16
sp_384_cmp_6:
#else
.section __TEXT,__text
.globl _sp_384_cmp_6
.p2align 4
_sp_384_cmp_6:
#endif /* __APPLE__ */
xorq %rcx, %rcx
movq $-1, %rdx
movq $-1, %rax
movq $0x01, %r8
movq 40(%rdi), %r9
movq 40(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 32(%rdi), %r9
movq 32(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 24(%rdi), %r9
movq 24(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 16(%rdi), %r9
movq 16(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 8(%rdi), %r9
movq 8(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq (%rdi), %r9
movq (%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
xorq %rdx, %rax
repz retq
#ifndef __APPLE__
.size sp_384_cmp_6,.-sp_384_cmp_6
#endif /* __APPLE__ */
/* Add two Montgomery form numbers (r = a + b % m).
*
* r Result of addition.
* a First number to add in Montgomery form.
* b Second number to add in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_384_mont_add_6
.type sp_384_mont_add_6,@function
.align 16
sp_384_mont_add_6:
#else
.section __TEXT,__text
.globl _sp_384_mont_add_6
.p2align 4
_sp_384_mont_add_6:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
movq 32(%rsi), %r10
movq 40(%rsi), %r11
addq (%rdx), %rax
adcq 8(%rdx), %rcx
movq $0xffffffff00000000, %r13
adcq 16(%rdx), %r8
movq $0xfffffffffffffffe, %r14
adcq 24(%rdx), %r9
adcq 32(%rdx), %r10
adcq 40(%rdx), %r11
sbbq %rsi, %rsi
movl %esi, %r12d
andq %rsi, %r13
andq %rsi, %r14
subq %r12, %rax
sbbq %r13, %rcx
sbbq %r14, %r8
sbbq %rsi, %r9
sbbq %rsi, %r10
sbbq %rsi, %r11
adcq $0x00, %rsi
andq %rsi, %r12
andq %rsi, %r13
andq %rsi, %r14
subq %r12, %rax
sbbq %r13, %rcx
movq %rax, (%rdi)
sbbq %r14, %r8
movq %rcx, 8(%rdi)
sbbq %rsi, %r9
movq %r8, 16(%rdi)
sbbq %rsi, %r10
movq %r9, 24(%rdi)
sbbq %rsi, %r11
movq %r10, 32(%rdi)
movq %r11, 40(%rdi)
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_384_mont_add_6,.-sp_384_mont_add_6
#endif /* __APPLE__ */
/* Double a Montgomery form number (r = a + a % m).
*
* r Result of doubling.
* a Number to double in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_384_mont_dbl_6
.type sp_384_mont_dbl_6,@function
.align 16
sp_384_mont_dbl_6:
#else
.section __TEXT,__text
.globl _sp_384_mont_dbl_6
.p2align 4
_sp_384_mont_dbl_6:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
movq 32(%rsi), %r9
movq 40(%rsi), %r10
addq %rdx, %rdx
adcq %rax, %rax
movq $0xffffffff00000000, %r12
adcq %rcx, %rcx
movq $0xfffffffffffffffe, %r13
adcq %r8, %r8
adcq %r9, %r9
movq %r10, %r14
adcq %r10, %r10
sarq $63, %r14
movl %r14d, %r11d
andq %r14, %r12
andq %r14, %r13
subq %r11, %rdx
sbbq %r12, %rax
sbbq %r13, %rcx
sbbq %r14, %r8
sbbq %r14, %r9
sbbq %r14, %r10
adcq $0x00, %r14
andq %r14, %r11
andq %r14, %r12
andq %r14, %r13
subq %r11, %rdx
sbbq %r12, %rax
movq %rdx, (%rdi)
sbbq %r13, %rcx
movq %rax, 8(%rdi)
sbbq %r14, %r8
movq %rcx, 16(%rdi)
sbbq %r14, %r9
movq %r8, 24(%rdi)
sbbq %r14, %r10
movq %r9, 32(%rdi)
movq %r10, 40(%rdi)
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_384_mont_dbl_6,.-sp_384_mont_dbl_6
#endif /* __APPLE__ */
/* Double a Montgomery form number (r = a + a % m).
*
* r Result of doubling.
* a Number to double in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_384_mont_tpl_6
.type sp_384_mont_tpl_6,@function
.align 16
sp_384_mont_tpl_6:
#else
.section __TEXT,__text
.globl _sp_384_mont_tpl_6
.p2align 4
_sp_384_mont_tpl_6:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
movq 32(%rsi), %r9
movq 40(%rsi), %r10
addq %rdx, %rdx
adcq %rax, %rax
movq $0xffffffff00000000, %r12
adcq %rcx, %rcx
movq $0xfffffffffffffffe, %r13
adcq %r8, %r8
adcq %r9, %r9
adcq %r10, %r10
sbbq %r14, %r14
movl %r14d, %r11d
andq %r14, %r12
andq %r14, %r13
subq %r11, %rdx
sbbq %r12, %rax
sbbq %r13, %rcx
sbbq %r14, %r8
sbbq %r14, %r9
sbbq %r14, %r10
adcq $0x00, %r14
andq %r14, %r11
andq %r14, %r12
andq %r14, %r13
subq %r11, %rdx
sbbq %r12, %rax
movq %rdx, (%rdi)
sbbq %r13, %rcx
sbbq %r14, %r8
sbbq %r14, %r9
sbbq %r14, %r10
addq (%rsi), %rdx
adcq 8(%rsi), %rax
movq $0xffffffff00000000, %r12
adcq 16(%rsi), %rcx
movq $0xfffffffffffffffe, %r13
adcq 24(%rsi), %r8
adcq 32(%rsi), %r9
adcq 40(%rsi), %r10
sbbq %r14, %r14
movl %r14d, %r11d
andq %r14, %r12
andq %r14, %r13
subq %r11, %rdx
sbbq %r12, %rax
sbbq %r13, %rcx
sbbq %r14, %r8
sbbq %r14, %r9
sbbq %r14, %r10
adcq $0x00, %r14
andq %r14, %r11
andq %r14, %r12
andq %r14, %r13
subq %r11, %rdx
sbbq %r12, %rax
movq %rdx, (%rdi)
sbbq %r13, %rcx
movq %rax, 8(%rdi)
sbbq %r14, %r8
movq %rcx, 16(%rdi)
sbbq %r14, %r9
movq %r8, 24(%rdi)
sbbq %r14, %r10
movq %r9, 32(%rdi)
movq %r10, 40(%rdi)
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_384_mont_tpl_6,.-sp_384_mont_tpl_6
#endif /* __APPLE__ */
/* Subtract two Montgomery form numbers (r = a - b % m).
*
* r Result of subtration.
* a Number to subtract from in Montgomery form.
* b Number to subtract with in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_384_mont_sub_6
.type sp_384_mont_sub_6,@function
.align 16
sp_384_mont_sub_6:
#else
.section __TEXT,__text
.globl _sp_384_mont_sub_6
.p2align 4
_sp_384_mont_sub_6:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
movq 32(%rsi), %r10
movq 40(%rsi), %r11
subq (%rdx), %rax
sbbq 8(%rdx), %rcx
movq $0xffffffff00000000, %r13
sbbq 16(%rdx), %r8
movq $0xfffffffffffffffe, %r14
sbbq 24(%rdx), %r9
sbbq 32(%rdx), %r10
sbbq 40(%rdx), %r11
sbbq %rsi, %rsi
movl %esi, %r12d
andq %rsi, %r13
andq %rsi, %r14
addq %r12, %rax
adcq %r13, %rcx
adcq %r14, %r8
adcq %rsi, %r9
adcq %rsi, %r10
adcq %rsi, %r11
adcq $0x00, %rsi
andq %rsi, %r12
andq %rsi, %r13
andq %rsi, %r14
addq %r12, %rax
adcq %r13, %rcx
movq %rax, (%rdi)
adcq %r14, %r8
movq %rcx, 8(%rdi)
adcq %rsi, %r9
movq %r8, 16(%rdi)
adcq %rsi, %r10
movq %r9, 24(%rdi)
adcq %rsi, %r11
movq %r10, 32(%rdi)
movq %r11, 40(%rdi)
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_384_mont_sub_6,.-sp_384_mont_sub_6
#endif /* __APPLE__ */
/* Divide the number by 2 mod the modulus (prime). (r = a / 2 % m)
*
* r Result of division by 2.
* a Number to divide.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_384_mont_div2_6
.type sp_384_mont_div2_6,@function
.align 16
sp_384_mont_div2_6:
#else
.section __TEXT,__text
.globl _sp_384_mont_div2_6
.p2align 4
_sp_384_mont_div2_6:
#endif /* __APPLE__ */
subq $48, %rsp
movq (%rsi), %r11
xorq %r10, %r10
movq %r11, %rax
andq $0x01, %r11
negq %r11
movq (%rdx), %r8
andq %r11, %r8
movq %r8, (%rsp)
movq 8(%rdx), %r8
andq %r11, %r8
movq %r8, 8(%rsp)
movq 16(%rdx), %r8
andq %r11, %r8
movq %r8, 16(%rsp)
movq 24(%rdx), %r8
andq %r11, %r8
movq %r8, 24(%rsp)
movq 32(%rdx), %r8
andq %r11, %r8
movq %r8, 32(%rsp)
movq 40(%rdx), %r8
andq %r11, %r8
movq %r8, 40(%rsp)
addq %rax, (%rsp)
movq 8(%rsi), %rax
adcq %rax, 8(%rsp)
movq 16(%rsi), %rax
adcq %rax, 16(%rsp)
movq 24(%rsi), %rax
adcq %rax, 24(%rsp)
movq 32(%rsi), %rax
adcq %rax, 32(%rsp)
movq 40(%rsi), %rax
adcq %rax, 40(%rsp)
adcq $0x00, %r10
movq (%rsp), %rax
movq 8(%rsp), %rcx
shrdq $0x01, %rcx, %rax
movq %rax, (%rdi)
movq 16(%rsp), %rax
shrdq $0x01, %rax, %rcx
movq %rcx, 8(%rdi)
movq 24(%rsp), %rcx
shrdq $0x01, %rcx, %rax
movq %rax, 16(%rdi)
movq 32(%rsp), %rax
shrdq $0x01, %rax, %rcx
movq %rcx, 24(%rdi)
movq 40(%rsp), %rcx
shrdq $0x01, %rcx, %rax
movq %rax, 32(%rdi)
shrdq $0x01, %r10, %rcx
movq %rcx, 40(%rdi)
addq $48, %rsp
repz retq
#ifndef __APPLE__
.size sp_384_mont_div2_6,.-sp_384_mont_div2_6
#endif /* __APPLE__ */
#ifndef WC_NO_CACHE_RESISTANT
/* Touch each possible point that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of point to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_384_get_point_33_6
.type sp_384_get_point_33_6,@function
.align 16
sp_384_get_point_33_6:
#else
.section __TEXT,__text
.globl _sp_384_get_point_33_6
.p2align 4
_sp_384_get_point_33_6:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %edx, %xmm13
addq $0x128, %rsi
movd %eax, %xmm15
movq $32, %rax
pshufd $0x00, %xmm15, %xmm15
pshufd $0x00, %xmm13, %xmm13
pxor %xmm14, %xmm14
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
movdqa %xmm15, %xmm14
L_384_get_point_33_6_start_1:
movdqa %xmm14, %xmm12
paddd %xmm15, %xmm14
pcmpeqd %xmm13, %xmm12
movdqu (%rsi), %xmm6
movdqu 16(%rsi), %xmm7
movdqu 32(%rsi), %xmm8
movdqu 96(%rsi), %xmm9
movdqu 112(%rsi), %xmm10
movdqu 128(%rsi), %xmm11
addq $0x128, %rsi
pand %xmm12, %xmm6
pand %xmm12, %xmm7
pand %xmm12, %xmm8
pand %xmm12, %xmm9
pand %xmm12, %xmm10
pand %xmm12, %xmm11
por %xmm6, %xmm0
por %xmm7, %xmm1
por %xmm8, %xmm2
por %xmm9, %xmm3
por %xmm10, %xmm4
por %xmm11, %xmm5
decq %rax
jnz L_384_get_point_33_6_start_1
movdqu %xmm0, (%rdi)
movdqu %xmm1, 16(%rdi)
movdqu %xmm2, 32(%rdi)
movdqu %xmm3, 96(%rdi)
movdqu %xmm4, 112(%rdi)
movdqu %xmm5, 128(%rdi)
movq $0x01, %rax
movd %edx, %xmm13
subq $0x2500, %rsi
movd %eax, %xmm15
movq $32, %rax
pshufd $0x00, %xmm15, %xmm15
pshufd $0x00, %xmm13, %xmm13
pxor %xmm14, %xmm14
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
movdqa %xmm15, %xmm14
L_384_get_point_33_6_start_2:
movdqa %xmm14, %xmm12
paddd %xmm15, %xmm14
pcmpeqd %xmm13, %xmm12
movdqu 192(%rsi), %xmm6
movdqu 208(%rsi), %xmm7
movdqu 224(%rsi), %xmm8
addq $0x128, %rsi
pand %xmm12, %xmm6
pand %xmm12, %xmm7
pand %xmm12, %xmm8
por %xmm6, %xmm0
por %xmm7, %xmm1
por %xmm8, %xmm2
decq %rax
jnz L_384_get_point_33_6_start_2
movdqu %xmm0, 192(%rdi)
movdqu %xmm1, 208(%rdi)
movdqu %xmm2, 224(%rdi)
repz retq
#ifndef __APPLE__
.size sp_384_get_point_33_6,.-sp_384_get_point_33_6
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Touch each possible point that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of point to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_384_get_point_33_avx2_6
.type sp_384_get_point_33_avx2_6,@function
.align 16
sp_384_get_point_33_avx2_6:
#else
.section __TEXT,__text
.globl _sp_384_get_point_33_avx2_6
.p2align 4
_sp_384_get_point_33_avx2_6:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %edx, %xmm13
addq $0x128, %rsi
movd %eax, %xmm15
movq $32, %rax
vpxor %ymm14, %ymm14, %ymm14
vpermd %ymm13, %ymm14, %ymm13
vpermd %ymm15, %ymm14, %ymm15
vpxor %ymm0, %ymm0, %ymm0
vpxor %xmm1, %xmm1, %xmm1
vpxor %ymm2, %ymm2, %ymm2
vpxor %xmm3, %xmm3, %xmm3
vpxor %ymm4, %ymm4, %ymm4
vpxor %xmm5, %xmm5, %xmm5
vmovdqa %ymm15, %ymm14
L_384_get_point_33_avx2_6_start:
vpcmpeqd %ymm13, %ymm14, %ymm12
vpaddd %ymm15, %ymm14, %ymm14
vmovupd (%rsi), %ymm6
vmovdqu 32(%rsi), %xmm7
vmovupd 96(%rsi), %ymm8
vmovdqu 128(%rsi), %xmm9
vmovupd 192(%rsi), %ymm10
vmovdqu 224(%rsi), %xmm11
addq $0x128, %rsi
vpand %ymm12, %ymm6, %ymm6
vpand %xmm12, %xmm7, %xmm7
vpand %ymm12, %ymm8, %ymm8
vpand %xmm12, %xmm9, %xmm9
vpand %ymm12, %ymm10, %ymm10
vpand %xmm12, %xmm11, %xmm11
vpor %ymm6, %ymm0, %ymm0
vpor %xmm7, %xmm1, %xmm1
vpor %ymm8, %ymm2, %ymm2
vpor %xmm9, %xmm3, %xmm3
vpor %ymm10, %ymm4, %ymm4
vpor %xmm11, %xmm5, %xmm5
decq %rax
jnz L_384_get_point_33_avx2_6_start
vmovupd %ymm0, (%rdi)
vmovdqu %xmm1, 32(%rdi)
vmovupd %ymm2, 96(%rdi)
vmovdqu %xmm3, 128(%rdi)
vmovupd %ymm4, 192(%rdi)
vmovdqu %xmm5, 224(%rdi)
repz retq
#ifndef __APPLE__
.size sp_384_get_point_33_avx2_6,.-sp_384_get_point_33_avx2_6
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* !WC_NO_CACHE_RESISTANT */
#ifdef HAVE_INTEL_AVX2
/* Reduce the number back to 384 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_384_mont_reduce_order_avx2_6
.type sp_384_mont_reduce_order_avx2_6,@function
.align 16
sp_384_mont_reduce_order_avx2_6:
#else
.section __TEXT,__text
.globl _sp_384_mont_reduce_order_avx2_6
.p2align 4
_sp_384_mont_reduce_order_avx2_6:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
movq %rdx, %rax
xorq %r13, %r13
movq (%rdi), %r12
xorq %r11, %r11
L_mont_loop_order_avx2_6:
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r9
imulq %rax, %rdx
xorq %r11, %r11
# a[i+0] += m[0] * mu
mulxq (%rsi), %rcx, %r8
movq 8(%rdi), %r12
adcxq %rcx, %r9
adoxq %r8, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rcx, %r8
movq 16(%rdi), %r9
adcxq %rcx, %r12
adoxq %r8, %r9
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rcx, %r8
movq 24(%rdi), %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 16(%rdi)
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rcx, %r8
movq 32(%rdi), %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 24(%rdi)
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rcx, %r8
movq 40(%rdi), %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 32(%rdi)
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rcx, %r8
movq 48(%rdi), %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 40(%rdi)
adcxq %r13, %r9
movq %r9, 48(%rdi)
movq %r11, %r13
adoxq %r11, %r13
adcxq %r11, %r13
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r9
imulq %rax, %rdx
xorq %r11, %r11
# a[i+0] += m[0] * mu
mulxq (%rsi), %rcx, %r8
movq 16(%rdi), %r12
adcxq %rcx, %r9
adoxq %r8, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rcx, %r8
movq 24(%rdi), %r9
adcxq %rcx, %r12
adoxq %r8, %r9
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rcx, %r8
movq 32(%rdi), %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 24(%rdi)
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rcx, %r8
movq 40(%rdi), %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 32(%rdi)
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rcx, %r8
movq 48(%rdi), %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 40(%rdi)
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rcx, %r8
movq 56(%rdi), %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 48(%rdi)
adcxq %r13, %r9
movq %r9, 56(%rdi)
movq %r11, %r13
adoxq %r11, %r13
adcxq %r11, %r13
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r9
imulq %rax, %rdx
xorq %r11, %r11
# a[i+0] += m[0] * mu
mulxq (%rsi), %rcx, %r8
movq 24(%rdi), %r12
adcxq %rcx, %r9
adoxq %r8, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rcx, %r8
movq 32(%rdi), %r9
adcxq %rcx, %r12
adoxq %r8, %r9
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rcx, %r8
movq 40(%rdi), %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 32(%rdi)
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rcx, %r8
movq 48(%rdi), %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 40(%rdi)
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rcx, %r8
movq 56(%rdi), %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 48(%rdi)
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rcx, %r8
movq 64(%rdi), %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 56(%rdi)
adcxq %r13, %r9
movq %r9, 64(%rdi)
movq %r11, %r13
adoxq %r11, %r13
adcxq %r11, %r13
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r9
imulq %rax, %rdx
xorq %r11, %r11
# a[i+0] += m[0] * mu
mulxq (%rsi), %rcx, %r8
movq 32(%rdi), %r12
adcxq %rcx, %r9
adoxq %r8, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rcx, %r8
movq 40(%rdi), %r9
adcxq %rcx, %r12
adoxq %r8, %r9
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rcx, %r8
movq 48(%rdi), %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 40(%rdi)
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rcx, %r8
movq 56(%rdi), %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 48(%rdi)
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rcx, %r8
movq 64(%rdi), %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 56(%rdi)
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rcx, %r8
movq 72(%rdi), %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 64(%rdi)
adcxq %r13, %r9
movq %r9, 72(%rdi)
movq %r11, %r13
adoxq %r11, %r13
adcxq %r11, %r13
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r9
imulq %rax, %rdx
xorq %r11, %r11
# a[i+0] += m[0] * mu
mulxq (%rsi), %rcx, %r8
movq 40(%rdi), %r12
adcxq %rcx, %r9
adoxq %r8, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rcx, %r8
movq 48(%rdi), %r9
adcxq %rcx, %r12
adoxq %r8, %r9
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rcx, %r8
movq 56(%rdi), %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 48(%rdi)
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rcx, %r8
movq 64(%rdi), %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 56(%rdi)
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rcx, %r8
movq 72(%rdi), %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 64(%rdi)
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rcx, %r8
movq 80(%rdi), %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 72(%rdi)
adcxq %r13, %r9
movq %r9, 80(%rdi)
movq %r11, %r13
adoxq %r11, %r13
adcxq %r11, %r13
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r9
imulq %rax, %rdx
xorq %r11, %r11
# a[i+0] += m[0] * mu
mulxq (%rsi), %rcx, %r8
movq 48(%rdi), %r12
adcxq %rcx, %r9
adoxq %r8, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rcx, %r8
movq 56(%rdi), %r9
adcxq %rcx, %r12
adoxq %r8, %r9
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rcx, %r8
movq 64(%rdi), %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 56(%rdi)
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rcx, %r8
movq 72(%rdi), %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 64(%rdi)
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rcx, %r8
movq 80(%rdi), %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 72(%rdi)
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rcx, %r8
movq 88(%rdi), %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 80(%rdi)
adcxq %r13, %r9
movq %r9, 88(%rdi)
movq %r11, %r13
adoxq %r11, %r13
adcxq %r11, %r13
negq %r13
movq %rdi, %rax
addq $48, %rdi
movq (%rsi), %r8
movq %r12, %rdx
pextq %r13, %r8, %r8
subq %r8, %rdx
movq 8(%rsi), %r8
movq 8(%rdi), %rcx
pextq %r13, %r8, %r8
movq %rdx, (%rax)
sbbq %r8, %rcx
movq 16(%rsi), %rdx
movq 16(%rdi), %r8
pextq %r13, %rdx, %rdx
movq %rcx, 8(%rax)
sbbq %rdx, %r8
movq 24(%rsi), %rcx
movq 24(%rdi), %rdx
pextq %r13, %rcx, %rcx
movq %r8, 16(%rax)
sbbq %rcx, %rdx
movq 32(%rsi), %r8
movq 32(%rdi), %rcx
pextq %r13, %r8, %r8
movq %rdx, 24(%rax)
sbbq %r8, %rcx
movq 40(%rsi), %rdx
movq 40(%rdi), %r8
pextq %r13, %rdx, %rdx
movq %rcx, 32(%rax)
sbbq %rdx, %r8
movq %r8, 40(%rax)
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_384_mont_reduce_order_avx2_6,.-sp_384_mont_reduce_order_avx2_6
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_384_cond_sub_avx2_6
.type sp_384_cond_sub_avx2_6,@function
.align 16
sp_384_cond_sub_avx2_6:
#else
.section __TEXT,__text
.globl _sp_384_cond_sub_avx2_6
.p2align 4
_sp_384_cond_sub_avx2_6:
#endif /* __APPLE__ */
movq (%rdx), %r10
movq (%rsi), %r8
pextq %rcx, %r10, %r10
subq %r10, %r8
movq 8(%rdx), %r10
movq 8(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, (%rdi)
sbbq %r10, %r9
movq 16(%rdx), %r8
movq 16(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 8(%rdi)
sbbq %r8, %r10
movq 24(%rdx), %r9
movq 24(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 16(%rdi)
sbbq %r9, %r8
movq 32(%rdx), %r10
movq 32(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 24(%rdi)
sbbq %r10, %r9
movq 40(%rdx), %r8
movq 40(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 32(%rdi)
sbbq %r8, %r10
movq %r10, 40(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_384_cond_sub_avx2_6,.-sp_384_cond_sub_avx2_6
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Divide the number by 2 mod the modulus (prime). (r = a / 2 % m)
*
* r Result of division by 2.
* a Number to divide.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_384_mont_div2_avx2_6
.type sp_384_mont_div2_avx2_6,@function
.align 16
sp_384_mont_div2_avx2_6:
#else
.section __TEXT,__text
.globl _sp_384_mont_div2_avx2_6
.p2align 4
_sp_384_mont_div2_avx2_6:
#endif /* __APPLE__ */
movq (%rsi), %r11
xorq %r10, %r10
movq %r11, %r8
andq $0x01, %r11
negq %r11
movq (%rdx), %rax
movq 8(%rdx), %rcx
movq (%rsi), %r8
movq 8(%rsi), %r9
pextq %r11, %rax, %rax
pextq %r11, %rcx, %rcx
addq %rax, %r8
adcq %rcx, %r9
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq 16(%rdx), %rax
movq 24(%rdx), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
pextq %r11, %rax, %rax
pextq %r11, %rcx, %rcx
adcq %rax, %r8
adcq %rcx, %r9
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rdx), %rax
movq 40(%rdx), %rcx
movq 32(%rsi), %r8
movq 40(%rsi), %r9
pextq %r11, %rax, %rax
pextq %r11, %rcx, %rcx
adcq %rax, %r8
adcq %rcx, %r9
movq %r8, 32(%rdi)
movq %r9, 40(%rdi)
adcq $0x00, %r10
movq (%rdi), %r8
movq 8(%rdi), %r9
shrdq $0x01, %r9, %r8
movq %r8, (%rdi)
movq 16(%rdi), %r8
shrdq $0x01, %r8, %r9
movq %r9, 8(%rdi)
movq 24(%rdi), %r9
shrdq $0x01, %r9, %r8
movq %r8, 16(%rdi)
movq 32(%rdi), %r8
shrdq $0x01, %r8, %r9
movq %r9, 24(%rdi)
movq 40(%rdi), %r9
shrdq $0x01, %r9, %r8
movq %r8, 32(%rdi)
shrdq $0x01, %r10, %r9
movq %r9, 40(%rdi)
repz retq
#ifndef __APPLE__
.size sp_384_mont_div2_avx2_6,.-sp_384_mont_div2_avx2_6
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifndef WC_NO_CACHE_RESISTANT
/* Touch each possible entry that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of entry to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_384_get_entry_64_6
.type sp_384_get_entry_64_6,@function
.align 16
sp_384_get_entry_64_6:
#else
.section __TEXT,__text
.globl _sp_384_get_entry_64_6
.p2align 4
_sp_384_get_entry_64_6:
#endif /* __APPLE__ */
# From entry 1
movq $0x01, %rax
movd %edx, %xmm13
addq $0x60, %rsi
movd %eax, %xmm15
movq $63, %rax
pshufd $0x00, %xmm15, %xmm15
pshufd $0x00, %xmm13, %xmm13
pxor %xmm14, %xmm14
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
movdqa %xmm15, %xmm14
L_384_get_entry_64_6_start_0:
movdqa %xmm14, %xmm12
paddd %xmm15, %xmm14
pcmpeqd %xmm13, %xmm12
movdqu (%rsi), %xmm6
movdqu 16(%rsi), %xmm7
movdqu 32(%rsi), %xmm8
movdqu 48(%rsi), %xmm9
movdqu 64(%rsi), %xmm10
movdqu 80(%rsi), %xmm11
addq $0x60, %rsi
pand %xmm12, %xmm6
pand %xmm12, %xmm7
pand %xmm12, %xmm8
pand %xmm12, %xmm9
pand %xmm12, %xmm10
pand %xmm12, %xmm11
por %xmm6, %xmm0
por %xmm7, %xmm1
por %xmm8, %xmm2
por %xmm9, %xmm3
por %xmm10, %xmm4
por %xmm11, %xmm5
decq %rax
jnz L_384_get_entry_64_6_start_0
movdqu %xmm0, (%rdi)
movdqu %xmm1, 16(%rdi)
movdqu %xmm2, 32(%rdi)
movdqu %xmm3, 96(%rdi)
movdqu %xmm4, 112(%rdi)
movdqu %xmm5, 128(%rdi)
repz retq
#ifndef __APPLE__
.size sp_384_get_entry_64_6,.-sp_384_get_entry_64_6
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Touch each possible entry that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of entry to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_384_get_entry_64_avx2_6
.type sp_384_get_entry_64_avx2_6,@function
.align 16
sp_384_get_entry_64_avx2_6:
#else
.section __TEXT,__text
.globl _sp_384_get_entry_64_avx2_6
.p2align 4
_sp_384_get_entry_64_avx2_6:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %edx, %xmm9
addq $0x60, %rsi
movd %eax, %xmm11
movq $0x40, %rax
vpxor %ymm10, %ymm10, %ymm10
vpermd %ymm9, %ymm10, %ymm9
vpermd %ymm11, %ymm10, %ymm11
vpxor %ymm0, %ymm0, %ymm0
vpxor %xmm1, %xmm1, %xmm1
vpxor %ymm2, %ymm2, %ymm2
vpxor %xmm3, %xmm3, %xmm3
vmovdqa %ymm11, %ymm10
L_384_get_entry_64_avx2_6_start:
vpcmpeqd %ymm9, %ymm10, %ymm8
vpaddd %ymm11, %ymm10, %ymm10
vmovupd (%rsi), %ymm4
vmovdqu 32(%rsi), %xmm5
vmovupd 48(%rsi), %ymm6
vmovdqu 80(%rsi), %xmm7
addq $0x60, %rsi
vpand %ymm8, %ymm4, %ymm4
vpand %xmm8, %xmm5, %xmm5
vpand %ymm8, %ymm6, %ymm6
vpand %xmm8, %xmm7, %xmm7
vpor %ymm4, %ymm0, %ymm0
vpor %xmm5, %xmm1, %xmm1
vpor %ymm6, %ymm2, %ymm2
vpor %xmm7, %xmm3, %xmm3
decq %rax
jnz L_384_get_entry_64_avx2_6_start
vmovupd %ymm0, (%rdi)
vmovdqu %xmm1, 32(%rdi)
vmovupd %ymm2, 96(%rdi)
vmovdqu %xmm3, 128(%rdi)
repz retq
#ifndef __APPLE__
.size sp_384_get_entry_64_avx2_6,.-sp_384_get_entry_64_avx2_6
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* !WC_NO_CACHE_RESISTANT */
#ifndef WC_NO_CACHE_RESISTANT
/* Touch each possible entry that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of entry to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_384_get_entry_65_6
.type sp_384_get_entry_65_6,@function
.align 16
sp_384_get_entry_65_6:
#else
.section __TEXT,__text
.globl _sp_384_get_entry_65_6
.p2align 4
_sp_384_get_entry_65_6:
#endif /* __APPLE__ */
# From entry 1
movq $0x01, %rax
movd %edx, %xmm13
addq $0x60, %rsi
movd %eax, %xmm15
movq $0x40, %rax
pshufd $0x00, %xmm15, %xmm15
pshufd $0x00, %xmm13, %xmm13
pxor %xmm14, %xmm14
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
movdqa %xmm15, %xmm14
L_384_get_entry_65_6_start_0:
movdqa %xmm14, %xmm12
paddd %xmm15, %xmm14
pcmpeqd %xmm13, %xmm12
movdqu (%rsi), %xmm6
movdqu 16(%rsi), %xmm7
movdqu 32(%rsi), %xmm8
movdqu 48(%rsi), %xmm9
movdqu 64(%rsi), %xmm10
movdqu 80(%rsi), %xmm11
addq $0x60, %rsi
pand %xmm12, %xmm6
pand %xmm12, %xmm7
pand %xmm12, %xmm8
pand %xmm12, %xmm9
pand %xmm12, %xmm10
pand %xmm12, %xmm11
por %xmm6, %xmm0
por %xmm7, %xmm1
por %xmm8, %xmm2
por %xmm9, %xmm3
por %xmm10, %xmm4
por %xmm11, %xmm5
decq %rax
jnz L_384_get_entry_65_6_start_0
movdqu %xmm0, (%rdi)
movdqu %xmm1, 16(%rdi)
movdqu %xmm2, 32(%rdi)
movdqu %xmm3, 96(%rdi)
movdqu %xmm4, 112(%rdi)
movdqu %xmm5, 128(%rdi)
repz retq
#ifndef __APPLE__
.size sp_384_get_entry_65_6,.-sp_384_get_entry_65_6
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Touch each possible entry that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of entry to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_384_get_entry_65_avx2_6
.type sp_384_get_entry_65_avx2_6,@function
.align 16
sp_384_get_entry_65_avx2_6:
#else
.section __TEXT,__text
.globl _sp_384_get_entry_65_avx2_6
.p2align 4
_sp_384_get_entry_65_avx2_6:
#endif /* __APPLE__ */
movq $0x01, %rax
movd %edx, %xmm9
addq $0x60, %rsi
movd %eax, %xmm11
movq $0x41, %rax
vpxor %ymm10, %ymm10, %ymm10
vpermd %ymm9, %ymm10, %ymm9
vpermd %ymm11, %ymm10, %ymm11
vpxor %ymm0, %ymm0, %ymm0
vpxor %xmm1, %xmm1, %xmm1
vpxor %ymm2, %ymm2, %ymm2
vpxor %xmm3, %xmm3, %xmm3
vmovdqa %ymm11, %ymm10
L_384_get_entry_65_avx2_6_start:
vpcmpeqd %ymm9, %ymm10, %ymm8
vpaddd %ymm11, %ymm10, %ymm10
vmovupd (%rsi), %ymm4
vmovdqu 32(%rsi), %xmm5
vmovupd 48(%rsi), %ymm6
vmovdqu 80(%rsi), %xmm7
addq $0x60, %rsi
vpand %ymm8, %ymm4, %ymm4
vpand %xmm8, %xmm5, %xmm5
vpand %ymm8, %ymm6, %ymm6
vpand %xmm8, %xmm7, %xmm7
vpor %ymm4, %ymm0, %ymm0
vpor %xmm5, %xmm1, %xmm1
vpor %ymm6, %ymm2, %ymm2
vpor %xmm7, %xmm3, %xmm3
decq %rax
jnz L_384_get_entry_65_avx2_6_start
vmovupd %ymm0, (%rdi)
vmovdqu %xmm1, 32(%rdi)
vmovupd %ymm2, 96(%rdi)
vmovdqu %xmm3, 128(%rdi)
repz retq
#ifndef __APPLE__
.size sp_384_get_entry_65_avx2_6,.-sp_384_get_entry_65_avx2_6
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* !WC_NO_CACHE_RESISTANT */
/* Add 1 to a. (a = a + 1)
*
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_384_add_one_6
.type sp_384_add_one_6,@function
.align 16
sp_384_add_one_6:
#else
.section __TEXT,__text
.globl _sp_384_add_one_6
.p2align 4
_sp_384_add_one_6:
#endif /* __APPLE__ */
addq $0x01, (%rdi)
adcq $0x00, 8(%rdi)
adcq $0x00, 16(%rdi)
adcq $0x00, 24(%rdi)
adcq $0x00, 32(%rdi)
adcq $0x00, 40(%rdi)
repz retq
#ifndef __APPLE__
.size sp_384_add_one_6,.-sp_384_add_one_6
#endif /* __APPLE__ */
/* Read big endian unsigned byte array into r.
* Uses the bswap instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_384_from_bin_bswap
.type sp_384_from_bin_bswap,@function
.align 16
sp_384_from_bin_bswap:
#else
.section __TEXT,__text
.globl _sp_384_from_bin_bswap
.p2align 4
_sp_384_from_bin_bswap:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $48, %r10
xorq %r11, %r11
jmp L_384_from_bin_bswap_64_end
L_384_from_bin_bswap_64_start:
subq $0x40, %r9
movq 56(%r9), %rax
movq 48(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq 40(%r9), %rax
movq 32(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movq 24(%r9), %rax
movq 16(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq 8(%r9), %rax
movq (%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_384_from_bin_bswap_64_end:
cmpq $63, %rcx
jg L_384_from_bin_bswap_64_start
jmp L_384_from_bin_bswap_8_end
L_384_from_bin_bswap_8_start:
subq $8, %r9
movq (%r9), %rax
bswapq %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_384_from_bin_bswap_8_end:
cmpq $7, %rcx
jg L_384_from_bin_bswap_8_start
cmpq %r11, %rcx
je L_384_from_bin_bswap_hi_end
movq %r11, %r8
movq %r11, %rax
L_384_from_bin_bswap_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_384_from_bin_bswap_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_384_from_bin_bswap_hi_end:
cmpq %r10, %rdi
jge L_384_from_bin_bswap_zero_end
L_384_from_bin_bswap_zero_start:
movq %r11, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_384_from_bin_bswap_zero_start
L_384_from_bin_bswap_zero_end:
repz retq
#ifndef __APPLE__
.size sp_384_from_bin_bswap,.-sp_384_from_bin_bswap
#endif /* __APPLE__ */
#ifndef NO_MOVBE_SUPPORT
/* Read big endian unsigned byte array into r.
* Uses the movbe instruction which is an optional instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_384_from_bin_movbe
.type sp_384_from_bin_movbe,@function
.align 16
sp_384_from_bin_movbe:
#else
.section __TEXT,__text
.globl _sp_384_from_bin_movbe
.p2align 4
_sp_384_from_bin_movbe:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $48, %r10
jmp L_384_from_bin_movbe_64_end
L_384_from_bin_movbe_64_start:
subq $0x40, %r9
movbeq 56(%r9), %rax
movbeq 48(%r9), %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movbeq 40(%r9), %rax
movbeq 32(%r9), %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movbeq 24(%r9), %rax
movbeq 16(%r9), %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movbeq 8(%r9), %rax
movbeq (%r9), %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_384_from_bin_movbe_64_end:
cmpq $63, %rcx
jg L_384_from_bin_movbe_64_start
jmp L_384_from_bin_movbe_8_end
L_384_from_bin_movbe_8_start:
subq $8, %r9
movbeq (%r9), %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_384_from_bin_movbe_8_end:
cmpq $7, %rcx
jg L_384_from_bin_movbe_8_start
cmpq $0x00, %rcx
je L_384_from_bin_movbe_hi_end
movq $0x00, %r8
movq $0x00, %rax
L_384_from_bin_movbe_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_384_from_bin_movbe_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_384_from_bin_movbe_hi_end:
cmpq %r10, %rdi
jge L_384_from_bin_movbe_zero_end
L_384_from_bin_movbe_zero_start:
movq $0x00, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_384_from_bin_movbe_zero_start
L_384_from_bin_movbe_zero_end:
repz retq
#ifndef __APPLE__
.size sp_384_from_bin_movbe,.-sp_384_from_bin_movbe
#endif /* __APPLE__ */
#endif /* !NO_MOVBE_SUPPORT */
/* Write r as big endian to byte array.
* Fixed length number of bytes written: 48
* Uses the bswap instruction.
*
* r A single precision integer.
* a Byte array.
*/
#ifndef __APPLE__
.text
.globl sp_384_to_bin_bswap_6
.type sp_384_to_bin_bswap_6,@function
.align 16
sp_384_to_bin_bswap_6:
#else
.section __TEXT,__text
.globl _sp_384_to_bin_bswap_6
.p2align 4
_sp_384_to_bin_bswap_6:
#endif /* __APPLE__ */
movq 40(%rdi), %rdx
movq 32(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, (%rsi)
movq %rax, 8(%rsi)
movq 24(%rdi), %rdx
movq 16(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 16(%rsi)
movq %rax, 24(%rsi)
movq 8(%rdi), %rdx
movq (%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 32(%rsi)
movq %rax, 40(%rsi)
repz retq
#ifndef __APPLE__
.size sp_384_to_bin_bswap_6,.-sp_384_to_bin_bswap_6
#endif /* __APPLE__ */
#ifndef NO_MOVBE_SUPPORT
/* Write r as big endian to byte array.
* Fixed length number of bytes written: 48
* Uses the movbe instruction which is optional.
*
* r A single precision integer.
* a Byte array.
*/
#ifndef __APPLE__
.text
.globl sp_384_to_bin_movbe_6
.type sp_384_to_bin_movbe_6,@function
.align 16
sp_384_to_bin_movbe_6:
#else
.section __TEXT,__text
.globl _sp_384_to_bin_movbe_6
.p2align 4
_sp_384_to_bin_movbe_6:
#endif /* __APPLE__ */
movbeq 40(%rdi), %rdx
movbeq 32(%rdi), %rax
movq %rdx, (%rsi)
movq %rax, 8(%rsi)
movbeq 24(%rdi), %rdx
movbeq 16(%rdi), %rax
movq %rdx, 16(%rsi)
movq %rax, 24(%rsi)
movbeq 8(%rdi), %rdx
movbeq (%rdi), %rax
movq %rdx, 32(%rsi)
movq %rax, 40(%rsi)
repz retq
#ifndef __APPLE__
.size sp_384_to_bin_movbe_6,.-sp_384_to_bin_movbe_6
#endif /* __APPLE__ */
#endif /* NO_MOVBE_SUPPORT */
/* Sub b from a into a. (a -= b)
*
* a A single precision integer and result.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_384_sub_in_place_6
.type sp_384_sub_in_place_6,@function
.align 16
sp_384_sub_in_place_6:
#else
.section __TEXT,__text
.globl _sp_384_sub_in_place_6
.p2align 4
_sp_384_sub_in_place_6:
#endif /* __APPLE__ */
movq (%rsi), %rdx
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
movq 32(%rsi), %r10
movq 40(%rsi), %r11
subq %rdx, (%rdi)
sbbq %rcx, 8(%rdi)
sbbq %r8, 16(%rdi)
sbbq %r9, 24(%rdi)
sbbq %r10, 32(%rdi)
sbbq %r11, 40(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_384_sub_in_place_6,.-sp_384_sub_in_place_6
#endif /* __APPLE__ */
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_384_mul_d_6
.type sp_384_mul_d_6,@function
.align 16
sp_384_mul_d_6:
#else
.section __TEXT,__text
.globl _sp_384_mul_d_6
.p2align 4
_sp_384_mul_d_6:
#endif /* __APPLE__ */
movq %rdx, %rcx
# A[0] * B
movq %rcx, %rax
xorq %r10, %r10
mulq (%rsi)
movq %rax, %r8
movq %rdx, %r9
movq %r8, (%rdi)
# A[1] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 8(%rsi)
addq %rax, %r9
movq %r9, 8(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 16(%rsi)
addq %rax, %r10
movq %r10, 16(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 24(%rsi)
addq %rax, %r8
movq %r8, 24(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 32(%rsi)
addq %rax, %r9
movq %r9, 32(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B
movq %rcx, %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
movq %r10, 40(%rdi)
movq %r8, 48(%rdi)
repz retq
#ifndef __APPLE__
.size sp_384_mul_d_6,.-sp_384_mul_d_6
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_384_mul_d_avx2_6
.type sp_384_mul_d_avx2_6,@function
.align 16
sp_384_mul_d_avx2_6:
#else
.section __TEXT,__text
.globl _sp_384_mul_d_avx2_6
.p2align 4
_sp_384_mul_d_avx2_6:
#endif /* __APPLE__ */
movq %rdx, %rax
# A[0] * B
movq %rax, %rdx
xorq %r11, %r11
mulxq (%rsi), %r9, %r10
movq %r9, (%rdi)
# A[1] * B
mulxq 8(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 8(%rdi)
# A[2] * B
mulxq 16(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 16(%rdi)
# A[3] * B
mulxq 24(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 24(%rdi)
# A[4] * B
mulxq 32(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 32(%rdi)
# A[5] * B
mulxq 40(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
adcxq %r11, %r9
movq %r10, 40(%rdi)
movq %r9, 48(%rdi)
repz retq
#ifndef __APPLE__
.size sp_384_mul_d_avx2_6,.-sp_384_mul_d_avx2_6
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef _WIN64
/* Divide the double width number (d1|d0) by the dividend. (d1|d0 / div)
*
* d1 The high order half of the number to divide.
* d0 The low order half of the number to divide.
* div The dividend.
* returns the result of the division.
*/
#ifndef __APPLE__
.text
.globl div_384_word_asm_6
.type div_384_word_asm_6,@function
.align 16
div_384_word_asm_6:
#else
.section __TEXT,__text
.globl _div_384_word_asm_6
.p2align 4
_div_384_word_asm_6:
#endif /* __APPLE__ */
movq %rdx, %rcx
movq %rsi, %rax
movq %rdi, %rdx
divq %rcx
repz retq
#ifndef __APPLE__
.size div_384_word_asm_6,.-div_384_word_asm_6
#endif /* __APPLE__ */
#endif /* _WIN64 */
/* Shift number right by 1 bit. (r = a >> 1)
*
* r Result of right shift by 1.
* a Number to shift.
*/
#ifndef __APPLE__
.text
.globl sp_384_rshift1_6
.type sp_384_rshift1_6,@function
.align 16
sp_384_rshift1_6:
#else
.section __TEXT,__text
.globl _sp_384_rshift1_6
.p2align 4
_sp_384_rshift1_6:
#endif /* __APPLE__ */
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
movq 32(%rsi), %r9
movq 40(%rsi), %r10
shrdq $0x01, %rax, %rdx
shrdq $0x01, %rcx, %rax
shrdq $0x01, %r8, %rcx
shrdq $0x01, %r9, %r8
shrdq $0x01, %r10, %r9
shrq $0x01, %r10
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
movq %r10, 40(%rdi)
repz retq
#ifndef __APPLE__
.size sp_384_rshift1_6,.-sp_384_rshift1_6
#endif /* __APPLE__ */
/* Divide the number by 2 mod the prime. (r = a / 2 % m)
*
* r Result of division by 2.
* a Number to divide.
* m Modulus
*/
#ifndef __APPLE__
.text
.globl sp_384_div2_mod_6
.type sp_384_div2_mod_6,@function
.align 16
sp_384_div2_mod_6:
#else
.section __TEXT,__text
.globl _sp_384_div2_mod_6
.p2align 4
_sp_384_div2_mod_6:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
movq 32(%rsi), %r10
movq 40(%rsi), %r11
movq (%rdx), %r12
movq 8(%rdx), %r13
movq 16(%rdx), %r14
movq 24(%rdx), %r15
movq 32(%rdx), %rbx
movq 40(%rdx), %rbp
movq %rax, %rdx
andq $0x01, %rdx
je L_384_mod_inv_6_div2_mod_no_add
addq %r12, %rax
adcq %r13, %rcx
adcq %r14, %r8
adcq %r15, %r9
adcq %rbx, %r10
adcq %rbp, %r11
movq $0x00, %rdx
adcq $0x00, %rdx
L_384_mod_inv_6_div2_mod_no_add:
shrdq $0x01, %rcx, %rax
shrdq $0x01, %r8, %rcx
shrdq $0x01, %r9, %r8
shrdq $0x01, %r10, %r9
shrdq $0x01, %r11, %r10
shrdq $0x01, %rdx, %r11
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq %r10, 32(%rdi)
movq %r11, 40(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_384_div2_mod_6,.-sp_384_div2_mod_6
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl sp_384_num_bits_6
.type sp_384_num_bits_6,@function
.align 16
sp_384_num_bits_6:
#else
.section __TEXT,__text
.globl _sp_384_num_bits_6
.p2align 4
_sp_384_num_bits_6:
#endif /* __APPLE__ */
xorq %rax, %rax
movq 40(%rdi), %rdx
cmpq $0x00, %rdx
je L_384_num_bits_6_end_320
movq $-1, %rax
bsr %rdx, %rax
addq $0x141, %rax
jmp L_384_num_bits_6_done
L_384_num_bits_6_end_320:
movq 32(%rdi), %rdx
cmpq $0x00, %rdx
je L_384_num_bits_6_end_256
movq $-1, %rax
bsr %rdx, %rax
addq $0x101, %rax
jmp L_384_num_bits_6_done
L_384_num_bits_6_end_256:
movq 24(%rdi), %rdx
cmpq $0x00, %rdx
je L_384_num_bits_6_end_192
movq $-1, %rax
bsr %rdx, %rax
addq $0xc1, %rax
jmp L_384_num_bits_6_done
L_384_num_bits_6_end_192:
movq 16(%rdi), %rdx
cmpq $0x00, %rdx
je L_384_num_bits_6_end_128
movq $-1, %rax
bsr %rdx, %rax
addq $0x81, %rax
jmp L_384_num_bits_6_done
L_384_num_bits_6_end_128:
movq 8(%rdi), %rdx
cmpq $0x00, %rdx
je L_384_num_bits_6_end_64
movq $-1, %rax
bsr %rdx, %rax
addq $0x41, %rax
jmp L_384_num_bits_6_done
L_384_num_bits_6_end_64:
movq (%rdi), %rdx
cmpq $0x00, %rdx
je L_384_num_bits_6_end_0
movq $-1, %rax
bsr %rdx, %rax
addq $0x01, %rax
jmp L_384_num_bits_6_done
L_384_num_bits_6_end_0:
L_384_num_bits_6_done:
repz retq
#ifndef __APPLE__
.size sp_384_num_bits_6,.-sp_384_num_bits_6
#endif /* __APPLE__ */
#endif /* WOLFSSL_SP_384 */
#ifdef WOLFSSL_SP_521
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_521_mul_9
.type sp_521_mul_9,@function
.align 16
sp_521_mul_9:
#else
.section __TEXT,__text
.globl _sp_521_mul_9
.p2align 4
_sp_521_mul_9:
#endif /* __APPLE__ */
movq %rdx, %rcx
subq $0x48, %rsp
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
movq %rax, (%rsp)
movq %rdx, %r9
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 8(%rsp)
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 16(%rsp)
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 24(%rsp)
# A[0] * B[4]
movq 32(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[0]
movq (%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 32(%rsp)
# A[0] * B[5]
movq 40(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[4]
movq 32(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[1]
movq 8(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[0]
movq (%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 40(%rsp)
# A[0] * B[6]
movq 48(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[5]
movq 40(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[4]
movq 32(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[2]
movq 16(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[1]
movq 8(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[0]
movq (%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 48(%rsp)
# A[0] * B[7]
movq 56(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[6]
movq 48(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[5]
movq 40(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[4]
movq 32(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[3]
movq 24(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[2]
movq 16(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[1]
movq 8(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[0]
movq (%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 56(%rsp)
# A[0] * B[8]
movq 64(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[7]
movq 56(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[6]
movq 48(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[5]
movq 40(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[4]
movq 32(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[3]
movq 24(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[2]
movq 16(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[1]
movq 8(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[0]
movq (%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 64(%rsp)
# A[1] * B[8]
movq 64(%rcx), %rax
mulq 8(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[7]
movq 56(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[6]
movq 48(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[5]
movq 40(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[4]
movq 32(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[3]
movq 24(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[2]
movq 16(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[1]
movq 8(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 72(%rdi)
# A[2] * B[8]
movq 64(%rcx), %rax
mulq 16(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[7]
movq 56(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[6]
movq 48(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[5]
movq 40(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[4]
movq 32(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[3]
movq 24(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[2]
movq 16(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 80(%rdi)
# A[3] * B[8]
movq 64(%rcx), %rax
mulq 24(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[7]
movq 56(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[6]
movq 48(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[5]
movq 40(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[4]
movq 32(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[3]
movq 24(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 88(%rdi)
# A[4] * B[8]
movq 64(%rcx), %rax
mulq 32(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[7]
movq 56(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[6]
movq 48(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[5]
movq 40(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[4]
movq 32(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 96(%rdi)
# A[5] * B[8]
movq 64(%rcx), %rax
mulq 40(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[7]
movq 56(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[6]
movq 48(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[5]
movq 40(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 104(%rdi)
# A[6] * B[8]
movq 64(%rcx), %rax
mulq 48(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[7]
movq 56(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[6]
movq 48(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 112(%rdi)
# A[7] * B[8]
movq 64(%rcx), %rax
mulq 56(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[7]
movq 56(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 120(%rdi)
# A[8] * B[8]
movq 64(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
movq %r9, 128(%rdi)
movq %r10, 136(%rdi)
movq (%rsp), %rax
movq 8(%rsp), %rdx
movq 16(%rsp), %r8
movq 24(%rsp), %r9
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rsp), %rax
movq 40(%rsp), %rdx
movq 48(%rsp), %r8
movq 56(%rsp), %r9
movq %rax, 32(%rdi)
movq %rdx, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rsp), %rax
movq %rax, 64(%rdi)
addq $0x48, %rsp
repz retq
#ifndef __APPLE__
.size sp_521_mul_9,.-sp_521_mul_9
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Multiply a and b into r. (r = a * b)
*
* r Result of multiplication.
* a First number to multiply.
* b Second number to multiply.
*/
#ifndef __APPLE__
.text
.globl sp_521_mul_avx2_9
.type sp_521_mul_avx2_9,@function
.align 16
sp_521_mul_avx2_9:
#else
.section __TEXT,__text
.globl _sp_521_mul_avx2_9
.p2align 4
_sp_521_mul_avx2_9:
#endif /* __APPLE__ */
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
movq %rdx, %rbp
subq $0x48, %rsp
cmpq %rdi, %rsi
movq %rsp, %rbx
cmovne %rdi, %rbx
cmpq %rdi, %rbp
cmove %rsp, %rbx
addq $0x48, %rdi
xorq %r13, %r13
movq (%rsi), %rdx
# A[0] * B[0]
mulx (%rbp), %r8, %r9
# A[0] * B[1]
mulx 8(%rbp), %rax, %r10
movq %r8, (%rbx)
adcxq %rax, %r9
# A[0] * B[2]
mulx 16(%rbp), %rax, %r11
movq %r9, 8(%rbx)
adcxq %rax, %r10
movq %r10, 16(%rbx)
# A[0] * B[3]
mulx 24(%rbp), %rax, %r8
adcxq %rax, %r11
# A[0] * B[4]
mulx 32(%rbp), %rax, %r9
movq %r11, 24(%rbx)
adcxq %rax, %r8
# A[0] * B[5]
mulx 40(%rbp), %rax, %r10
movq %r8, 32(%rbx)
adcxq %rax, %r9
movq %r9, 40(%rbx)
# A[0] * B[6]
mulx 48(%rbp), %rax, %r11
adcxq %rax, %r10
# A[0] * B[7]
mulx 56(%rbp), %rax, %r8
movq %r10, 48(%rbx)
adcxq %rax, %r11
# A[0] * B[8]
mulx 64(%rbp), %rax, %r9
movq %r11, 56(%rbx)
adcxq %rax, %r8
adcxq %r13, %r9
movq %r13, %r12
adcxq %r13, %r12
movq %r8, 64(%rbx)
movq %r9, (%rdi)
movq 8(%rsi), %rdx
movq 8(%rbx), %r9
movq 16(%rbx), %r10
movq 24(%rbx), %r11
movq 32(%rbx), %r8
# A[1] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[1] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 8(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[1] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r10, 16(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r8
movq %r11, 24(%rbx)
movq 40(%rbx), %r9
movq 48(%rbx), %r10
movq 56(%rbx), %r11
# A[1] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[1] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r8, 32(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[1] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r9, 40(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rbx)
movq 64(%rbx), %r8
movq (%rdi), %r9
# A[1] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r8
# A[1] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r11, 56(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[1] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r8, 64(%rbx)
movq %r13, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r12, %r10
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq 16(%rsi), %rdx
movq 16(%rbx), %r10
movq 24(%rbx), %r11
movq 32(%rbx), %r8
movq 40(%rbx), %r9
# A[2] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[2] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 16(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r8
# A[2] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r11, 24(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 32(%rbx)
movq 48(%rbx), %r10
movq 56(%rbx), %r11
movq 64(%rbx), %r8
# A[2] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[2] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r9, 40(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[2] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 48(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r8
movq %r11, 56(%rbx)
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[2] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[2] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r8, 64(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[2] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r9, (%rdi)
movq %r13, %r11
adcxq %rax, %r10
adoxq %rcx, %r11
adcxq %r12, %r11
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq 24(%rsi), %rdx
movq 24(%rbx), %r11
movq 32(%rbx), %r8
movq 40(%rbx), %r9
movq 48(%rbx), %r10
# A[3] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r8
# A[3] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r11, 24(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[3] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r8, 32(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 40(%rbx)
movq 56(%rbx), %r11
movq 64(%rbx), %r8
movq (%rdi), %r9
# A[3] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[3] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r10, 48(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r8
# A[3] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r11, 56(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 64(%rbx)
movq 8(%rdi), %r10
movq 16(%rdi), %r11
# A[3] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[3] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[3] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
movq %r13, %r8
adcxq %rax, %r11
adoxq %rcx, %r8
adcxq %r12, %r8
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r11, 16(%rdi)
movq %r8, 24(%rdi)
movq 32(%rsi), %rdx
movq 32(%rbx), %r8
movq 40(%rbx), %r9
movq 48(%rbx), %r10
movq 56(%rbx), %r11
# A[4] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[4] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r8, 32(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[4] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r9, 40(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rbx)
movq 64(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[4] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r8
# A[4] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r11, 56(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[4] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, 64(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, (%rdi)
movq 16(%rdi), %r11
movq 24(%rdi), %r8
# A[4] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[4] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r8
# A[4] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
movq %r13, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r12, %r9
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
movq 40(%rsi), %rdx
movq 40(%rbx), %r9
movq 48(%rbx), %r10
movq 56(%rbx), %r11
movq 64(%rbx), %r8
# A[5] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[5] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 40(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[5] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r10, 48(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r8
movq %r11, 56(%rbx)
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
# A[5] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r8, 64(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[5] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 8(%rdi)
movq 24(%rdi), %r8
movq 32(%rdi), %r9
# A[5] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r8
# A[5] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r8, 24(%rdi)
movq %r13, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r12, %r10
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r9, 32(%rdi)
movq %r10, 40(%rdi)
movq 48(%rsi), %rdx
movq 48(%rbx), %r10
movq 56(%rbx), %r11
movq 64(%rbx), %r8
movq (%rdi), %r9
# A[6] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[6] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 48(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r8
# A[6] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r11, 56(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 64(%rbx)
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r8
# A[6] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[6] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[6] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r8
movq %r11, 16(%rdi)
movq 32(%rdi), %r9
movq 40(%rdi), %r10
# A[6] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[6] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r8, 24(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[6] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r9, 32(%rdi)
movq %r13, %r11
adcxq %rax, %r10
adoxq %rcx, %r11
adcxq %r12, %r11
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r10, 40(%rdi)
movq %r11, 48(%rdi)
movq 56(%rsi), %rdx
movq 56(%rbx), %r11
movq 64(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[7] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r8
# A[7] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r11, 56(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[7] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r8, 64(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, (%rdi)
movq 16(%rdi), %r11
movq 24(%rdi), %r8
movq 32(%rdi), %r9
# A[7] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[7] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r8
# A[7] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 24(%rdi)
movq 40(%rdi), %r10
movq 48(%rdi), %r11
# A[7] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[7] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, 32(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[7] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r10, 40(%rdi)
movq %r13, %r8
adcxq %rax, %r11
adoxq %rcx, %r8
adcxq %r12, %r8
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r11, 48(%rdi)
movq %r8, 56(%rdi)
movq 64(%rsi), %rdx
movq 64(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
# A[8] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[8] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r8, 64(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[8] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 8(%rdi)
movq 24(%rdi), %r8
movq 32(%rdi), %r9
movq 40(%rdi), %r10
# A[8] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r8
# A[8] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[8] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, 24(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 32(%rdi)
movq 48(%rdi), %r11
movq 56(%rdi), %r8
# A[8] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[8] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 40(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r8
# A[8] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r11, 48(%rdi)
movq %r13, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r12, %r9
movq %r8, 56(%rdi)
movq %r9, 64(%rdi)
subq $0x48, %rdi
cmpq %rdi, %rsi
je L_start_521_mul_avx2_9
cmpq %rdi, %rbp
jne L_end_521_mul_avx2_9
L_start_521_mul_avx2_9:
vmovdqu (%rbx), %xmm0
vmovups %xmm0, (%rdi)
vmovdqu 16(%rbx), %xmm0
vmovups %xmm0, 16(%rdi)
vmovdqu 32(%rbx), %xmm0
vmovups %xmm0, 32(%rdi)
vmovdqu 48(%rbx), %xmm0
vmovups %xmm0, 48(%rdi)
movq 64(%rbx), %rax
movq %rax, 64(%rdi)
L_end_521_mul_avx2_9:
addq $0x48, %rsp
popq %r13
popq %r12
popq %rbp
popq %rbx
repz retq
#ifndef __APPLE__
.size sp_521_mul_avx2_9,.-sp_521_mul_avx2_9
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Square a and put result in r. (r = a * a)
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_521_sqr_9
.type sp_521_sqr_9,@function
.align 16
sp_521_sqr_9:
#else
.section __TEXT,__text
.globl _sp_521_sqr_9
.p2align 4
_sp_521_sqr_9:
#endif /* __APPLE__ */
pushq %r12
subq $0x48, %rsp
# A[0] * A[0]
movq (%rsi), %rax
mulq %rax
xorq %r9, %r9
movq %rax, (%rsp)
movq %rdx, %r8
# A[0] * A[1]
movq 8(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 8(%rsp)
# A[0] * A[2]
movq 16(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
# A[1] * A[1]
movq 8(%rsi), %rax
mulq %rax
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 16(%rsp)
# A[0] * A[3]
movq 24(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * A[2]
movq 16(%rsi), %rax
mulq 8(%rsi)
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 24(%rsp)
# A[0] * A[4]
movq 32(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[1] * A[3]
movq 24(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[2] * A[2]
movq 16(%rsi), %rax
mulq %rax
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 32(%rsp)
# A[0] * A[5]
movq 40(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[4]
movq 32(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[3]
movq 24(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 40(%rsp)
# A[0] * A[6]
movq 48(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[5]
movq 40(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[4]
movq 32(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[3]
movq 24(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 48(%rsp)
# A[0] * A[7]
movq 56(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[6]
movq 48(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[5]
movq 40(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[4]
movq 32(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 56(%rsp)
# A[0] * A[8]
movq 64(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[7]
movq 56(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[6]
movq 48(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[5]
movq 40(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[4]
movq 32(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 64(%rsp)
# A[1] * A[8]
movq 64(%rsi), %rax
mulq 8(%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[2] * A[7]
movq 56(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[6]
movq 48(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[5]
movq 40(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 72(%rdi)
# A[2] * A[8]
movq 64(%rsi), %rax
mulq 16(%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[3] * A[7]
movq 56(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[6]
movq 48(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[5]
movq 40(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 80(%rdi)
# A[3] * A[8]
movq 64(%rsi), %rax
mulq 24(%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[4] * A[7]
movq 56(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[6]
movq 48(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 88(%rdi)
# A[4] * A[8]
movq 64(%rsi), %rax
mulq 32(%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * A[7]
movq 56(%rsi), %rax
mulq 40(%rsi)
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * A[6]
movq 48(%rsi), %rax
mulq %rax
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 96(%rdi)
# A[5] * A[8]
movq 64(%rsi), %rax
mulq 40(%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[6] * A[7]
movq 56(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 104(%rdi)
# A[6] * A[8]
movq 64(%rsi), %rax
mulq 48(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
# A[7] * A[7]
movq 56(%rsi), %rax
mulq %rax
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 112(%rdi)
# A[7] * A[8]
movq 64(%rsi), %rax
mulq 56(%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 120(%rdi)
# A[8] * A[8]
movq 64(%rsi), %rax
mulq %rax
addq %rax, %r8
adcq %rdx, %r9
movq %r8, 128(%rdi)
movq %r9, 136(%rdi)
movq (%rsp), %rax
movq 8(%rsp), %rdx
movq 16(%rsp), %r10
movq 24(%rsp), %r11
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq 32(%rsp), %rax
movq 40(%rsp), %rdx
movq 48(%rsp), %r10
movq 56(%rsp), %r11
movq %rax, 32(%rdi)
movq %rdx, 40(%rdi)
movq %r10, 48(%rdi)
movq %r11, 56(%rdi)
movq 64(%rsp), %rax
movq %rax, 64(%rdi)
addq $0x48, %rsp
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_sqr_9,.-sp_521_sqr_9
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Square a and put result in r. (r = a * a)
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_521_sqr_avx2_9
.type sp_521_sqr_avx2_9,@function
.align 16
sp_521_sqr_avx2_9:
#else
.section __TEXT,__text
.globl _sp_521_sqr_avx2_9
.p2align 4
_sp_521_sqr_avx2_9:
#endif /* __APPLE__ */
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
subq $0x48, %rsp
cmpq %rdi, %rsi
movq %rsp, %rbp
cmovne %rdi, %rbp
addq $0x48, %rdi
xorq %r10, %r10
# Diagonal 1
# Zero into %r9
# A[1] x A[0]
movq (%rsi), %rdx
mulxq 8(%rsi), %r8, %r9
movq %r8, 8(%rbp)
# Zero into %r8
# A[2] x A[0]
mulxq 16(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 16(%rbp)
# No load %r12 - %r9
# A[3] x A[0]
mulxq 24(%rsi), %rax, %r12
adcxq %rax, %r8
adoxq %r10, %r12
movq %r8, 24(%rbp)
# No load %r13 - %r8
# A[4] x A[0]
mulxq 32(%rsi), %rax, %r13
adcxq %rax, %r12
adoxq %r10, %r13
# No store %r12 - %r9
# No load %r14 - %r9
# A[5] x A[0]
mulxq 40(%rsi), %rax, %r14
adcxq %rax, %r13
adoxq %r10, %r14
# No store %r13 - %r8
# No load %r15 - %r8
# A[6] x A[0]
mulxq 48(%rsi), %rax, %r15
adcxq %rax, %r14
adoxq %r10, %r15
# No store %r14 - %r9
# No load %rbx - %r9
# A[7] x A[0]
mulxq 56(%rsi), %rax, %rbx
adcxq %rax, %r15
adoxq %r10, %rbx
# No store %r15 - %r8
# Zero into %r8
# A[8] x A[0]
mulxq 64(%rsi), %rax, %r8
adcxq %rax, %rbx
adoxq %r10, %r8
# No store %rbx - %r9
# Zero into %r9
# A[8] x A[1]
movq 8(%rsi), %rdx
mulxq 64(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, (%rdi)
# Carry
adcxq %r10, %r9
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r9, 8(%rdi)
# Diagonal 2
movq 24(%rbp), %r9
# No load %r12 - %r8
# A[2] x A[1]
mulxq 16(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r12
movq %r9, 24(%rbp)
# No load %r13 - %r9
# A[3] x A[1]
mulxq 24(%rsi), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r13
# No store %r12 - %r8
# No load %r14 - %r8
# A[4] x A[1]
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
# No store %r13 - %r9
# No load %r15 - %r9
# A[5] x A[1]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# No store %r14 - %r8
# No load %rbx - %r8
# A[6] x A[1]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r15 - %r9
movq (%rdi), %r9
# A[7] x A[1]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r9
# No store %rbx - %r8
movq 8(%rdi), %r8
# A[7] x A[2]
movq 16(%rsi), %rdx
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, (%rdi)
# Zero into %r9
# A[7] x A[3]
movq 24(%rsi), %rdx
mulxq 56(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, 8(%rdi)
# Zero into %r8
# A[7] x A[4]
movq 32(%rsi), %rdx
mulxq 56(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 16(%rdi)
# Carry
adcxq %r11, %r8
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r8, 24(%rdi)
# Diagonal 3
# No load %r14 - %r9
# A[3] x A[2]
movq 16(%rsi), %rdx
mulxq 24(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
# No store %r13 - %r8
# No load %r15 - %r8
# A[4] x A[2]
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# No store %r14 - %r9
# No load %rbx - %r9
# A[5] x A[2]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r15 - %r8
movq (%rdi), %r8
# A[6] x A[2]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r8
# No store %rbx - %r9
movq 8(%rdi), %r9
# A[6] x A[3]
movq 24(%rsi), %rdx
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, (%rdi)
movq 16(%rdi), %r8
# A[6] x A[4]
movq 32(%rsi), %rdx
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 8(%rdi)
movq 24(%rdi), %r9
# A[6] x A[5]
movq 40(%rsi), %rdx
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 16(%rdi)
# Zero into %r8
# A[8] x A[4]
movq 32(%rsi), %rdx
mulxq 64(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 24(%rdi)
# Zero into %r9
# A[8] x A[5]
movq 40(%rsi), %rdx
mulxq 64(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, 32(%rdi)
# Carry
adcxq %r11, %r9
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r9, 40(%rdi)
# Diagonal 4
# No load %rbx - %r8
# A[4] x A[3]
movq 24(%rsi), %rdx
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r15 - %r9
movq (%rdi), %r9
# A[5] x A[3]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r9
# No store %rbx - %r8
movq 8(%rdi), %r8
# A[5] x A[4]
movq 32(%rsi), %rdx
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, (%rdi)
movq 16(%rdi), %r9
# A[8] x A[2]
movq 16(%rsi), %rdx
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 8(%rdi)
movq 24(%rdi), %r8
# A[8] x A[3]
movq 24(%rsi), %rdx
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 16(%rdi)
movq 32(%rdi), %r9
# A[7] x A[5]
movq 40(%rsi), %rdx
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 24(%rdi)
movq 40(%rdi), %r8
# A[7] x A[6]
movq 48(%rsi), %rdx
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 32(%rdi)
# Zero into %r9
# A[8] x A[6]
mulxq 64(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, 40(%rdi)
# Zero into %r8
# A[8] x A[7]
movq 56(%rsi), %rdx
mulxq 64(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 48(%rdi)
# Carry
adcxq %r11, %r8
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r8, 56(%rdi)
movq %r11, 64(%rdi)
# Double and Add in A[i] x A[i]
movq 8(%rbp), %r9
# A[0] x A[0]
movq (%rsi), %rdx
mulxq %rdx, %rax, %rcx
movq %rax, (%rbp)
adoxq %r9, %r9
adcxq %rcx, %r9
movq %r9, 8(%rbp)
movq 16(%rbp), %r8
movq 24(%rbp), %r9
# A[1] x A[1]
movq 8(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 16(%rbp)
movq %r9, 24(%rbp)
# A[2] x A[2]
movq 16(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r12, %r12
adoxq %r13, %r13
adcxq %rax, %r12
adcxq %rcx, %r13
# A[3] x A[3]
movq 24(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r14, %r14
adoxq %r15, %r15
adcxq %rax, %r14
adcxq %rcx, %r15
movq (%rdi), %r9
# A[4] x A[4]
movq 32(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %rbx, %rbx
adoxq %r9, %r9
adcxq %rax, %rbx
adcxq %rcx, %r9
movq %r9, (%rdi)
movq 8(%rdi), %r8
movq 16(%rdi), %r9
# A[5] x A[5]
movq 40(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq 24(%rdi), %r8
movq 32(%rdi), %r9
# A[6] x A[6]
movq 48(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
movq 40(%rdi), %r8
movq 48(%rdi), %r9
# A[7] x A[7]
movq 56(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 40(%rdi)
movq %r9, 48(%rdi)
movq 56(%rdi), %r8
movq 64(%rdi), %r9
# A[8] x A[8]
movq 64(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 56(%rdi)
movq %r9, 64(%rdi)
movq %r12, -40(%rdi)
movq %r13, -32(%rdi)
movq %r14, -24(%rdi)
movq %r15, -16(%rdi)
movq %rbx, -8(%rdi)
subq $0x48, %rdi
cmpq %rdi, %rsi
jne L_end_521_sqr_avx2_9
vmovdqu (%rbp), %xmm0
vmovups %xmm0, (%rdi)
vmovdqu 16(%rbp), %xmm0
vmovups %xmm0, 16(%rdi)
L_end_521_sqr_avx2_9:
addq $0x48, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
repz retq
#ifndef __APPLE__
.size sp_521_sqr_avx2_9,.-sp_521_sqr_avx2_9
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Add b to a into r. (r = a + b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_521_add_9
.type sp_521_add_9,@function
.align 16
sp_521_add_9:
#else
.section __TEXT,__text
.globl _sp_521_add_9
.p2align 4
_sp_521_add_9:
#endif /* __APPLE__ */
# Add
movq (%rsi), %rcx
xorq %rax, %rax
addq (%rdx), %rcx
movq 8(%rsi), %r8
movq %rcx, (%rdi)
adcq 8(%rdx), %r8
movq 16(%rsi), %rcx
movq %r8, 8(%rdi)
adcq 16(%rdx), %rcx
movq 24(%rsi), %r8
movq %rcx, 16(%rdi)
adcq 24(%rdx), %r8
movq 32(%rsi), %rcx
movq %r8, 24(%rdi)
adcq 32(%rdx), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%rdi)
adcq 40(%rdx), %r8
movq 48(%rsi), %rcx
movq %r8, 40(%rdi)
adcq 48(%rdx), %rcx
movq 56(%rsi), %r8
movq %rcx, 48(%rdi)
adcq 56(%rdx), %r8
movq 64(%rsi), %rcx
movq %r8, 56(%rdi)
adcq 64(%rdx), %rcx
movq %rcx, 64(%rdi)
adcq $0x00, %rax
repz retq
#ifndef __APPLE__
.size sp_521_add_9,.-sp_521_add_9
#endif /* __APPLE__ */
/* Sub b from a into r. (r = a - b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_521_sub_9
.type sp_521_sub_9,@function
.align 16
sp_521_sub_9:
#else
.section __TEXT,__text
.globl _sp_521_sub_9
.p2align 4
_sp_521_sub_9:
#endif /* __APPLE__ */
movq (%rsi), %rcx
subq (%rdx), %rcx
movq 8(%rsi), %r8
movq %rcx, (%rdi)
sbbq 8(%rdx), %r8
movq 16(%rsi), %rcx
movq %r8, 8(%rdi)
sbbq 16(%rdx), %rcx
movq 24(%rsi), %r8
movq %rcx, 16(%rdi)
sbbq 24(%rdx), %r8
movq 32(%rsi), %rcx
movq %r8, 24(%rdi)
sbbq 32(%rdx), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%rdi)
sbbq 40(%rdx), %r8
movq 48(%rsi), %rcx
movq %r8, 40(%rdi)
sbbq 48(%rdx), %rcx
movq 56(%rsi), %r8
movq %rcx, 48(%rdi)
sbbq 56(%rdx), %r8
movq 64(%rsi), %rcx
movq %r8, 56(%rdi)
sbbq 64(%rdx), %rcx
movq %rcx, 64(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_521_sub_9,.-sp_521_sub_9
#endif /* __APPLE__ */
/* Conditionally copy a into r using the mask m.
* m is -1 to copy and 0 when not.
*
* r A single precision number to copy over.
* a A single precision number to copy.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_521_cond_copy_9
.type sp_521_cond_copy_9,@function
.align 16
sp_521_cond_copy_9:
#else
.section __TEXT,__text
.globl _sp_521_cond_copy_9
.p2align 4
_sp_521_cond_copy_9:
#endif /* __APPLE__ */
movq (%rdi), %rax
movq 8(%rdi), %rcx
movq 16(%rdi), %r8
movq 24(%rdi), %r9
movq 32(%rdi), %r10
xorq (%rsi), %rax
xorq 8(%rsi), %rcx
xorq 16(%rsi), %r8
xorq 24(%rsi), %r9
xorq 32(%rsi), %r10
andq %rdx, %rax
andq %rdx, %rcx
andq %rdx, %r8
andq %rdx, %r9
andq %rdx, %r10
xorq %rax, (%rdi)
xorq %rcx, 8(%rdi)
xorq %r8, 16(%rdi)
xorq %r9, 24(%rdi)
xorq %r10, 32(%rdi)
movq 40(%rdi), %rax
movq 48(%rdi), %rcx
movq 56(%rdi), %r8
movq 64(%rdi), %r9
xorq 40(%rsi), %rax
xorq 48(%rsi), %rcx
xorq 56(%rsi), %r8
xorq 64(%rsi), %r9
andq %rdx, %rax
andq %rdx, %rcx
andq %rdx, %r8
andq %rdx, %r9
xorq %rax, 40(%rdi)
xorq %rcx, 48(%rdi)
xorq %r8, 56(%rdi)
xorq %r9, 64(%rdi)
repz retq
#ifndef __APPLE__
.size sp_521_cond_copy_9,.-sp_521_cond_copy_9
#endif /* __APPLE__ */
/* Multiply two Montgomery form numbers mod the modulus (prime).
* (r = a * b mod m)
*
* r Result of multiplication.
* a First number to multiply in Montgomery form.
* b Second number to multiply in Montgomery form.
* m Modulus (prime).
* mp Montgomery multiplier.
*/
#ifndef __APPLE__
.text
.globl sp_521_mont_mul_9
.type sp_521_mont_mul_9,@function
.align 16
sp_521_mont_mul_9:
#else
.section __TEXT,__text
.globl _sp_521_mont_mul_9
.p2align 4
_sp_521_mont_mul_9:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
movq %rdx, %rcx
subq $0x90, %rsp
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
xorq %r13, %r13
movq %rax, (%rsp)
movq %rdx, %r12
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r11, %r11
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
movq %r12, 8(%rsp)
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
xorq %r12, %r12
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
movq %r13, 16(%rsp)
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
movq %r11, 24(%rsp)
# A[0] * B[4]
movq 32(%rcx), %rax
mulq (%rsi)
xorq %r11, %r11
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[4] * B[0]
movq (%rcx), %rax
mulq 32(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
movq %r12, 32(%rsp)
# A[0] * B[5]
movq 40(%rcx), %rax
mulq (%rsi)
xorq %r12, %r12
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[1] * B[4]
movq 32(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * B[1]
movq 8(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * B[0]
movq (%rcx), %rax
mulq 40(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
movq %r13, 40(%rsp)
# A[0] * B[6]
movq 48(%rcx), %rax
mulq (%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[1] * B[5]
movq 40(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[4]
movq 32(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[4] * B[2]
movq 16(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[5] * B[1]
movq 8(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[6] * B[0]
movq (%rcx), %rax
mulq 48(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
movq %r11, 48(%rsp)
# A[0] * B[7]
movq 56(%rcx), %rax
mulq (%rsi)
xorq %r11, %r11
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[1] * B[6]
movq 48(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[2] * B[5]
movq 40(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[3] * B[4]
movq 32(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[4] * B[3]
movq 24(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[5] * B[2]
movq 16(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[6] * B[1]
movq 8(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[7] * B[0]
movq (%rcx), %rax
mulq 56(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
movq %r12, 56(%rsp)
# A[0] * B[8]
movq 64(%rcx), %rax
mulq (%rsi)
xorq %r12, %r12
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[1] * B[7]
movq 56(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * B[6]
movq 48(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * B[5]
movq 40(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * B[4]
movq 32(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * B[3]
movq 24(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * B[2]
movq 16(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * B[1]
movq 8(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * B[0]
movq (%rcx), %rax
mulq 64(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
movq %r13, 64(%rsp)
# A[1] * B[8]
movq 64(%rcx), %rax
mulq 8(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * B[7]
movq 56(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * B[6]
movq 48(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[4] * B[5]
movq 40(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[5] * B[4]
movq 32(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[6] * B[3]
movq 24(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[7] * B[2]
movq 16(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[8] * B[1]
movq 8(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
movq %r11, 72(%rsp)
# A[2] * B[8]
movq 64(%rcx), %rax
mulq 16(%rsi)
xorq %r11, %r11
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[3] * B[7]
movq 56(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[4] * B[6]
movq 48(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[5] * B[5]
movq 40(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[6] * B[4]
movq 32(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[7] * B[3]
movq 24(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[8] * B[2]
movq 16(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
movq %r12, 80(%rsp)
# A[3] * B[8]
movq 64(%rcx), %rax
mulq 24(%rsi)
xorq %r12, %r12
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * B[7]
movq 56(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * B[6]
movq 48(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * B[5]
movq 40(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * B[4]
movq 32(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * B[3]
movq 24(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
movq %r13, 88(%rsp)
# A[4] * B[8]
movq 64(%rcx), %rax
mulq 32(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[5] * B[7]
movq 56(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[6] * B[6]
movq 48(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[7] * B[5]
movq 40(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[8] * B[4]
movq 32(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
movq %r11, 96(%rsp)
# A[5] * B[8]
movq 64(%rcx), %rax
mulq 40(%rsi)
xorq %r11, %r11
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[6] * B[7]
movq 56(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[7] * B[6]
movq 48(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
# A[8] * B[5]
movq 40(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x00, %r11
movq %r12, 104(%rsp)
# A[6] * B[8]
movq 64(%rcx), %rax
mulq 48(%rsi)
xorq %r12, %r12
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * B[7]
movq 56(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * B[6]
movq 48(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r13
adcq %rdx, %r11
adcq $0x00, %r12
movq %r13, 112(%rsp)
# A[7] * B[8]
movq 64(%rcx), %rax
mulq 56(%rsi)
xorq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[8] * B[7]
movq 56(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
movq %r11, 120(%rsp)
# A[8] * B[8]
movq 64(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r12
adcq %rdx, %r13
movq %r12, 128(%rsp)
movq %r13, 136(%rsp)
movq 64(%rsp), %rax
movq 72(%rsp), %rdx
movq 80(%rsp), %r11
movq %rax, %r10
andq $0x1ff, %r10
movq 88(%rsp), %r12
movq 96(%rsp), %r13
movq 104(%rsp), %rcx
movq 112(%rsp), %rsi
movq 120(%rsp), %r8
movq 128(%rsp), %r9
shrdq $9, %rdx, %rax
shrdq $9, %r11, %rdx
shrdq $9, %r12, %r11
shrdq $9, %r13, %r12
shrdq $9, %rcx, %r13
shrdq $9, %rsi, %rcx
shrdq $9, %r8, %rsi
shrdq $9, %r9, %r8
shrq $9, %r9
addq (%rsp), %rax
adcq 8(%rsp), %rdx
adcq 16(%rsp), %r11
adcq 24(%rsp), %r12
adcq 32(%rsp), %r13
adcq 40(%rsp), %rcx
adcq 48(%rsp), %rsi
adcq 56(%rsp), %r8
adcq %r9, %r10
movq %r10, %r9
shrq $9, %r10
andq $0x1ff, %r9
addq %r10, %rax
adcq $0x00, %rdx
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
adcq $0x00, %rcx
adcq $0x00, %rsi
adcq $0x00, %r8
adcq $0x00, %r9
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
movq %r13, 32(%rdi)
movq %rcx, 40(%rdi)
movq %rsi, 48(%rdi)
movq %r8, 56(%rdi)
movq %r9, 64(%rdi)
addq $0x90, %rsp
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_mont_mul_9,.-sp_521_mont_mul_9
#endif /* __APPLE__ */
/* Square the Montgomery form number mod the modulus (prime). (r = a * a mod m)
*
* r Result of squaring.
* a Number to square in Montgomery form.
* m Modulus (prime).
* mp Montgomery multiplier.
*/
#ifndef __APPLE__
.text
.globl sp_521_mont_sqr_9
.type sp_521_mont_sqr_9,@function
.align 16
sp_521_mont_sqr_9:
#else
.section __TEXT,__text
.globl _sp_521_mont_sqr_9
.p2align 4
_sp_521_mont_sqr_9:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
subq $0x90, %rsp
# A[0] * A[0]
movq (%rsi), %rax
mulq %rax
xorq %r10, %r10
movq %rax, (%rsp)
movq %rdx, %r9
# A[0] * A[1]
movq 8(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 8(%rsp)
# A[0] * A[2]
movq 16(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * A[1]
movq 8(%rsi), %rax
mulq %rax
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 16(%rsp)
# A[0] * A[3]
movq 24(%rsi), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * A[2]
movq 16(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 24(%rsp)
# A[0] * A[4]
movq 32(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * A[3]
movq 24(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * A[2]
movq 16(%rsi), %rax
mulq %rax
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 32(%rsp)
# A[0] * A[5]
movq 40(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
xorq %r13, %r13
movq %rax, %r11
movq %rdx, %r12
# A[1] * A[4]
movq 32(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * A[3]
movq 24(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
addq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
addq %r11, %r10
adcq %r12, %r8
adcq %r13, %r9
movq %r10, 40(%rsp)
# A[0] * A[6]
movq 48(%rsi), %rax
mulq (%rsi)
xorq %r10, %r10
xorq %r13, %r13
movq %rax, %r11
movq %rdx, %r12
# A[1] * A[5]
movq 40(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * A[4]
movq 32(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * A[3]
movq 24(%rsi), %rax
mulq %rax
addq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
addq %r11, %r8
adcq %r12, %r9
adcq %r13, %r10
movq %r8, 48(%rsp)
# A[0] * A[7]
movq 56(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r13, %r13
movq %rax, %r11
movq %rdx, %r12
# A[1] * A[6]
movq 48(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * A[5]
movq 40(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * A[4]
movq 32(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
addq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
addq %r11, %r9
adcq %r12, %r10
adcq %r13, %r8
movq %r9, 56(%rsp)
# A[0] * A[8]
movq 64(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
xorq %r13, %r13
movq %rax, %r11
movq %rdx, %r12
# A[1] * A[7]
movq 56(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[2] * A[6]
movq 48(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * A[5]
movq 40(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[4] * A[4]
movq 32(%rsi), %rax
mulq %rax
addq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
addq %r11, %r10
adcq %r12, %r8
adcq %r13, %r9
movq %r10, 64(%rsp)
# A[1] * A[8]
movq 64(%rsi), %rax
mulq 8(%rsi)
xorq %r10, %r10
xorq %r13, %r13
movq %rax, %r11
movq %rdx, %r12
# A[2] * A[7]
movq 56(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[3] * A[6]
movq 48(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[4] * A[5]
movq 40(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
addq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
addq %r11, %r8
adcq %r12, %r9
adcq %r13, %r10
movq %r8, 72(%rsp)
# A[2] * A[8]
movq 64(%rsi), %rax
mulq 16(%rsi)
xorq %r8, %r8
xorq %r13, %r13
movq %rax, %r11
movq %rdx, %r12
# A[3] * A[7]
movq 56(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[4] * A[6]
movq 48(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[5] * A[5]
movq 40(%rsi), %rax
mulq %rax
addq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
addq %r11, %r9
adcq %r12, %r10
adcq %r13, %r8
movq %r9, 80(%rsp)
# A[3] * A[8]
movq 64(%rsi), %rax
mulq 24(%rsi)
xorq %r9, %r9
xorq %r13, %r13
movq %rax, %r11
movq %rdx, %r12
# A[4] * A[7]
movq 56(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
# A[5] * A[6]
movq 48(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
addq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
addq %r11, %r10
adcq %r12, %r8
adcq %r13, %r9
movq %r10, 88(%rsp)
# A[4] * A[8]
movq 64(%rsi), %rax
mulq 32(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * A[7]
movq 56(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * A[6]
movq 48(%rsi), %rax
mulq %rax
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 96(%rsp)
# A[5] * A[8]
movq 64(%rsi), %rax
mulq 40(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * A[7]
movq 56(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 104(%rsp)
# A[6] * A[8]
movq 64(%rsi), %rax
mulq 48(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * A[7]
movq 56(%rsi), %rax
mulq %rax
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 112(%rsp)
# A[7] * A[8]
movq 64(%rsi), %rax
mulq 56(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 120(%rsp)
# A[8] * A[8]
movq 64(%rsi), %rax
mulq %rax
addq %rax, %r9
adcq %rdx, %r10
movq %r9, 128(%rsp)
movq %r10, 136(%rsp)
movq 64(%rsp), %r8
movq 72(%rsp), %r9
movq 80(%rsp), %r10
movq %r8, %rcx
andq $0x1ff, %rcx
movq 88(%rsp), %rax
movq 96(%rsp), %rdx
movq 104(%rsp), %r11
movq 112(%rsp), %r12
movq 120(%rsp), %r13
movq 128(%rsp), %rsi
shrdq $9, %r9, %r8
shrdq $9, %r10, %r9
shrdq $9, %rax, %r10
shrdq $9, %rdx, %rax
shrdq $9, %r11, %rdx
shrdq $9, %r12, %r11
shrdq $9, %r13, %r12
shrdq $9, %rsi, %r13
shrq $9, %rsi
addq (%rsp), %r8
adcq 8(%rsp), %r9
adcq 16(%rsp), %r10
adcq 24(%rsp), %rax
adcq 32(%rsp), %rdx
adcq 40(%rsp), %r11
adcq 48(%rsp), %r12
adcq 56(%rsp), %r13
adcq %rsi, %rcx
movq %rcx, %rsi
shrq $9, %rcx
andq $0x1ff, %rsi
addq %rcx, %r8
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %rax
adcq $0x00, %rdx
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
adcq $0x00, %rsi
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %rax, 24(%rdi)
movq %rdx, 32(%rdi)
movq %r11, 40(%rdi)
movq %r12, 48(%rdi)
movq %r13, 56(%rdi)
movq %rsi, 64(%rdi)
addq $0x90, %rsp
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_mont_sqr_9,.-sp_521_mont_sqr_9
#endif /* __APPLE__ */
/* Compare a with b in constant time.
*
* a A single precision integer.
* b A single precision integer.
* return -ve, 0 or +ve if a is less than, equal to or greater than b
* respectively.
*/
#ifndef __APPLE__
.text
.globl sp_521_cmp_9
.type sp_521_cmp_9,@function
.align 16
sp_521_cmp_9:
#else
.section __TEXT,__text
.globl _sp_521_cmp_9
.p2align 4
_sp_521_cmp_9:
#endif /* __APPLE__ */
xorq %rcx, %rcx
movq $-1, %rdx
movq $-1, %rax
movq $0x01, %r8
movq 64(%rdi), %r9
movq 64(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 56(%rdi), %r9
movq 56(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 48(%rdi), %r9
movq 48(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 40(%rdi), %r9
movq 40(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 32(%rdi), %r9
movq 32(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 24(%rdi), %r9
movq 24(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 16(%rdi), %r9
movq 16(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 8(%rdi), %r9
movq 8(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq (%rdi), %r9
movq (%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
xorq %rdx, %rax
repz retq
#ifndef __APPLE__
.size sp_521_cmp_9,.-sp_521_cmp_9
#endif /* __APPLE__ */
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_521_cond_sub_9
.type sp_521_cond_sub_9,@function
.align 16
sp_521_cond_sub_9:
#else
.section __TEXT,__text
.globl _sp_521_cond_sub_9
.p2align 4
_sp_521_cond_sub_9:
#endif /* __APPLE__ */
subq $0x48, %rsp
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
movq 64(%rdx), %r8
andq %rcx, %r8
movq %r8, 64(%rsp)
movq (%rsi), %r8
movq (%rsp), %rdx
subq %rdx, %r8
movq 8(%rsi), %r9
movq 8(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, (%rdi)
movq 16(%rsi), %r8
movq 16(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 8(%rdi)
movq 24(%rsi), %r9
movq 24(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 16(%rdi)
movq 32(%rsi), %r8
movq 32(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 24(%rdi)
movq 40(%rsi), %r9
movq 40(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 32(%rdi)
movq 48(%rsi), %r8
movq 48(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 40(%rdi)
movq 56(%rsi), %r9
movq 56(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 48(%rdi)
movq 64(%rsi), %r8
movq 64(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 56(%rdi)
movq %r8, 64(%rdi)
sbbq %rax, %rax
addq $0x48, %rsp
repz retq
#ifndef __APPLE__
.size sp_521_cond_sub_9,.-sp_521_cond_sub_9
#endif /* __APPLE__ */
/* Reduce the number back to 521 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_521_mont_reduce_9
.type sp_521_mont_reduce_9,@function
.align 16
sp_521_mont_reduce_9:
#else
.section __TEXT,__text
.globl _sp_521_mont_reduce_9
.p2align 4
_sp_521_mont_reduce_9:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
movq 64(%rdi), %rdx
movq 72(%rdi), %rax
movq 80(%rdi), %rcx
movq %rdx, %r14
andq $0x1ff, %r14
movq 88(%rdi), %r8
movq 96(%rdi), %r9
movq 104(%rdi), %r10
movq 112(%rdi), %r11
movq 120(%rdi), %r12
movq 128(%rdi), %r13
shrdq $9, %rax, %rdx
shrdq $9, %rcx, %rax
shrdq $9, %r8, %rcx
shrdq $9, %r9, %r8
shrdq $9, %r10, %r9
shrdq $9, %r11, %r10
shrdq $9, %r12, %r11
shrdq $9, %r13, %r12
shrq $9, %r13
addq (%rdi), %rdx
adcq 8(%rdi), %rax
adcq 16(%rdi), %rcx
adcq 24(%rdi), %r8
adcq 32(%rdi), %r9
adcq 40(%rdi), %r10
adcq 48(%rdi), %r11
adcq 56(%rdi), %r12
adcq %r13, %r14
movq %r14, %r13
shrq $9, %r14
andq $0x1ff, %r13
addq %r14, %rdx
adcq $0x00, %rax
adcq $0x00, %rcx
adcq $0x00, %r8
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
movq %r10, 40(%rdi)
movq %r11, 48(%rdi)
movq %r12, 56(%rdi)
movq %r13, 64(%rdi)
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_mont_reduce_9,.-sp_521_mont_reduce_9
#endif /* __APPLE__ */
/* Reduce the number back to 521 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_521_mont_reduce_order_9
.type sp_521_mont_reduce_order_9,@function
.align 16
sp_521_mont_reduce_order_9:
#else
.section __TEXT,__text
.globl _sp_521_mont_reduce_order_9
.p2align 4
_sp_521_mont_reduce_order_9:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rdx, %rcx
xorq %r15, %r15
# i = 9
movq $9, %r8
movq (%rdi), %r13
movq 8(%rdi), %r14
L_521_mont_reduce_order_9_loop:
# mu = a[i] * mp
movq %r13, %r11
imulq %rcx, %r11
cmpq $0x01, %r8
jne L_521_mont_reduce_order_9_nomask
andq $0x1ff, %r11
L_521_mont_reduce_order_9_nomask:
# a[i+0] += m[0] * mu
movq %r11, %rax
xorq %r10, %r10
mulq (%rsi)
addq %rax, %r13
movq %r13, (%rdi)
adcq %rdx, %r10
# a[i+1] += m[1] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 8(%rsi)
movq %r14, %r13
addq %rax, %r13
adcq %rdx, %r9
addq %r10, %r13
adcq $0x00, %r9
# a[i+2] += m[2] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 16(%rsi)
movq 16(%rdi), %r14
addq %rax, %r14
adcq %rdx, %r10
addq %r9, %r14
adcq $0x00, %r10
# a[i+3] += m[3] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 24(%rsi)
movq 24(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 24(%rdi)
adcq $0x00, %r9
# a[i+4] += m[4] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 32(%rsi)
movq 32(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 32(%rdi)
adcq $0x00, %r10
# a[i+5] += m[5] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 40(%rsi)
movq 40(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 40(%rdi)
adcq $0x00, %r9
# a[i+6] += m[6] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 48(%rsi)
movq 48(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 48(%rdi)
adcq $0x00, %r10
# a[i+7] += m[7] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 56(%rsi)
movq 56(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 56(%rdi)
adcq $0x00, %r9
# a[i+8] += m[8] * mu
movq %r11, %rax
mulq 64(%rsi)
movq 64(%rdi), %r12
addq %rax, %r9
adcq %r15, %rdx
movq $0x00, %r15
adcq $0x00, %r15
addq %r9, %r12
movq %r12, 64(%rdi)
adcq %rdx, 72(%rdi)
adcq $0x00, %r15
# i -= 1
addq $8, %rdi
decq %r8
jnz L_521_mont_reduce_order_9_loop
movq %r13, (%rdi)
movq %r14, 8(%rdi)
movq %rdi, %rcx
subq $0x48, %rdi
subq $8, %rcx
movq (%rcx), %rax
movq 8(%rcx), %rdx
movq 16(%rcx), %r8
movq 24(%rcx), %r9
movq 32(%rcx), %r11
shrdq $9, %rdx, %rax
shrdq $9, %r8, %rdx
shrdq $9, %r9, %r8
shrdq $9, %r11, %r9
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 40(%rcx), %rdx
movq 48(%rcx), %r8
movq 56(%rcx), %r9
movq 64(%rcx), %rax
shrdq $9, %rdx, %r11
shrdq $9, %r8, %rdx
shrdq $9, %r9, %r8
shrdq $9, %rax, %r9
movq %r11, 32(%rdi)
movq %rdx, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 72(%rcx), %rdx
shrdq $9, %rdx, %rax
shrq $9, %rdx
movq %rax, 64(%rdi)
movq %rdx, 72(%rdi)
movq 64(%rdi), %r15
shrq $9, %r15
negq %r15
#ifdef _WIN64
movq %rsi, %rdx
movq %r15, %rcx
#else
movq %r15, %rcx
movq %rsi, %rdx
#endif /* _WIN64 */
movq %rdi, %rsi
#ifndef __APPLE__
callq sp_521_cond_sub_9@plt
#else
callq _sp_521_cond_sub_9
#endif /* __APPLE__ */
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_mont_reduce_order_9,.-sp_521_mont_reduce_order_9
#endif /* __APPLE__ */
/* Add two Montgomery form numbers (r = a + b % m).
*
* r Result of addition.
* a First number to add in Montgomery form.
* b Second number to add in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_521_mont_add_9
.type sp_521_mont_add_9,@function
.align 16
sp_521_mont_add_9:
#else
.section __TEXT,__text
.globl _sp_521_mont_add_9
.p2align 4
_sp_521_mont_add_9:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
movq 32(%rsi), %r10
movq 40(%rsi), %r11
movq 48(%rsi), %r12
movq 56(%rsi), %r13
movq 64(%rsi), %r14
addq (%rdx), %rax
adcq 8(%rdx), %rcx
adcq 16(%rdx), %r8
adcq 24(%rdx), %r9
adcq 32(%rdx), %r10
adcq 40(%rdx), %r11
adcq 48(%rdx), %r12
adcq 56(%rdx), %r13
adcq 64(%rdx), %r14
movq %r14, %r15
andq $0x1ff, %r14
shrq $9, %r15
addq %r15, %rax
adcq $0x00, %rcx
adcq $0x00, %r8
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
adcq $0x00, %r14
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq %r10, 32(%rdi)
movq %r11, 40(%rdi)
movq %r12, 48(%rdi)
movq %r13, 56(%rdi)
movq %r14, 64(%rdi)
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_mont_add_9,.-sp_521_mont_add_9
#endif /* __APPLE__ */
/* Double a Montgomery form number (r = a + a % m).
*
* r Result of addition.
* a Number to souble in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_521_mont_dbl_9
.type sp_521_mont_dbl_9,@function
.align 16
sp_521_mont_dbl_9:
#else
.section __TEXT,__text
.globl _sp_521_mont_dbl_9
.p2align 4
_sp_521_mont_dbl_9:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
movq 32(%rsi), %r9
movq 40(%rsi), %r10
movq 48(%rsi), %r11
movq 56(%rsi), %r12
movq 64(%rsi), %r13
addq %rdx, %rdx
adcq %rax, %rax
adcq %rcx, %rcx
adcq %r8, %r8
adcq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
movq %r13, %r14
andq $0x1ff, %r13
shrq $9, %r14
addq %r14, %rdx
adcq $0x00, %rax
adcq $0x00, %rcx
adcq $0x00, %r8
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
movq %r10, 40(%rdi)
movq %r11, 48(%rdi)
movq %r12, 56(%rdi)
movq %r13, 64(%rdi)
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_mont_dbl_9,.-sp_521_mont_dbl_9
#endif /* __APPLE__ */
/* Triple a Montgomery form number (r = a + a + a % m).
*
* r Result of Tripling.
* a Number to triple in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_521_mont_tpl_9
.type sp_521_mont_tpl_9,@function
.align 16
sp_521_mont_tpl_9:
#else
.section __TEXT,__text
.globl _sp_521_mont_tpl_9
.p2align 4
_sp_521_mont_tpl_9:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
movq 32(%rsi), %r9
movq 40(%rsi), %r10
movq 48(%rsi), %r11
movq 56(%rsi), %r12
movq 64(%rsi), %r13
addq %rdx, %rdx
adcq %rax, %rax
adcq %rcx, %rcx
adcq %r8, %r8
adcq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
addq (%rsi), %rdx
adcq 8(%rsi), %rax
adcq 16(%rsi), %rcx
adcq 24(%rsi), %r8
adcq 32(%rsi), %r9
adcq 40(%rsi), %r10
adcq 48(%rsi), %r11
adcq 56(%rsi), %r12
adcq 64(%rsi), %r13
movq %r13, %r14
andq $0x1ff, %r13
shrq $9, %r14
addq %r14, %rdx
adcq $0x00, %rax
adcq $0x00, %rcx
adcq $0x00, %r8
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %r13
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
movq %r10, 40(%rdi)
movq %r11, 48(%rdi)
movq %r12, 56(%rdi)
movq %r13, 64(%rdi)
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_mont_tpl_9,.-sp_521_mont_tpl_9
#endif /* __APPLE__ */
/* Subtract two Montgomery form numbers (r = a - b % m).
*
* r Result of addition.
* a First number to add in Montgomery form.
* b Second number to add in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_521_mont_sub_9
.type sp_521_mont_sub_9,@function
.align 16
sp_521_mont_sub_9:
#else
.section __TEXT,__text
.globl _sp_521_mont_sub_9
.p2align 4
_sp_521_mont_sub_9:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
movq 32(%rsi), %r10
movq 40(%rsi), %r11
movq 48(%rsi), %r12
movq 56(%rsi), %r13
movq 64(%rsi), %r14
subq (%rdx), %rax
sbbq 8(%rdx), %rcx
sbbq 16(%rdx), %r8
sbbq 24(%rdx), %r9
sbbq 32(%rdx), %r10
sbbq 40(%rdx), %r11
sbbq 48(%rdx), %r12
sbbq 56(%rdx), %r13
sbbq 64(%rdx), %r14
movq %r14, %r15
andq $0x1ff, %r14
sarq $9, %r15
negq %r15
subq %r15, %rax
sbbq $0x00, %rcx
sbbq $0x00, %r8
sbbq $0x00, %r9
sbbq $0x00, %r10
sbbq $0x00, %r11
sbbq $0x00, %r12
sbbq $0x00, %r13
sbbq $0x00, %r14
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq %r10, 32(%rdi)
movq %r11, 40(%rdi)
movq %r12, 48(%rdi)
movq %r13, 56(%rdi)
movq %r14, 64(%rdi)
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_mont_sub_9,.-sp_521_mont_sub_9
#endif /* __APPLE__ */
/* Divide the number by 2 mod the modulus (prime). (r = a / 2 % m)
*
* r Result of division by 2.
* a Number to divide.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_521_mont_div2_9
.type sp_521_mont_div2_9,@function
.align 16
sp_521_mont_div2_9:
#else
.section __TEXT,__text
.globl _sp_521_mont_div2_9
.p2align 4
_sp_521_mont_div2_9:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
movq 32(%rsi), %r9
movq 40(%rsi), %r10
movq 48(%rsi), %r11
movq 56(%rsi), %r12
movq 64(%rsi), %r13
movq %rdx, %r14
andq $0x01, %r14
subq %r14, %rdx
sbbq $0x00, %rax
sbbq $0x00, %rcx
sbbq $0x00, %r8
sbbq $0x00, %r9
sbbq $0x00, %r10
sbbq $0x00, %r11
sbbq $0x00, %r12
sbbq $0x00, %r13
shlq $9, %r14
addq %r14, %r13
shrdq $0x01, %rax, %rdx
shrdq $0x01, %rcx, %rax
shrdq $0x01, %r8, %rcx
shrdq $0x01, %r9, %r8
shrdq $0x01, %r10, %r9
shrdq $0x01, %r11, %r10
shrdq $0x01, %r12, %r11
shrdq $0x01, %r13, %r12
shrq $0x01, %r13
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
movq %r10, 40(%rdi)
movq %r11, 48(%rdi)
movq %r12, 56(%rdi)
movq %r13, 64(%rdi)
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_mont_div2_9,.-sp_521_mont_div2_9
#endif /* __APPLE__ */
#ifndef WC_NO_CACHE_RESISTANT
/* Touch each possible point that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of point to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_521_get_point_33_9
.type sp_521_get_point_33_9,@function
.align 16
sp_521_get_point_33_9:
#else
.section __TEXT,__text
.globl _sp_521_get_point_33_9
.p2align 4
_sp_521_get_point_33_9:
#endif /* __APPLE__ */
pushq %r12
movq $0x01, %r12
movq $0x01, %rax
movd %edx, %xmm13
addq $0x1b8, %rsi
movd %eax, %xmm15
movq $32, %rax
pshufd $0x00, %xmm15, %xmm15
pshufd $0x00, %xmm13, %xmm13
pxor %xmm14, %xmm14
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
xorq %r10, %r10
xorq %r11, %r11
movdqa %xmm15, %xmm14
L_521_get_point_33_9_start_1:
movdqa %xmm14, %xmm12
paddd %xmm15, %xmm14
pcmpeqd %xmm13, %xmm12
xorq %rcx, %rcx
cmpq %r12, %rdx
sete %cl
negq %rcx
incq %r12
movdqu (%rsi), %xmm6
movdqu 16(%rsi), %xmm7
movdqu 32(%rsi), %xmm8
movdqu 48(%rsi), %xmm9
movq 64(%rsi), %r8
movdqu 144(%rsi), %xmm10
movdqu 160(%rsi), %xmm11
addq $0x1b8, %rsi
pand %xmm12, %xmm6
pand %xmm12, %xmm7
pand %xmm12, %xmm8
pand %xmm12, %xmm9
pand %xmm12, %xmm10
pand %xmm12, %xmm11
andq %rcx, %r8
por %xmm6, %xmm0
por %xmm7, %xmm1
por %xmm8, %xmm2
por %xmm9, %xmm3
por %xmm10, %xmm4
por %xmm11, %xmm5
orq %r8, %r10
decq %rax
jnz L_521_get_point_33_9_start_1
movdqu %xmm0, (%rdi)
movdqu %xmm1, 16(%rdi)
movdqu %xmm2, 32(%rdi)
movdqu %xmm3, 48(%rdi)
movq %r10, 64(%rdi)
movdqu %xmm4, 144(%rdi)
movdqu %xmm5, 160(%rdi)
movq $0x01, %r12
movq $0x01, %rax
movd %edx, %xmm13
subq $0x3700, %rsi
movd %eax, %xmm15
movq $32, %rax
pshufd $0x00, %xmm15, %xmm15
pshufd $0x00, %xmm13, %xmm13
pxor %xmm14, %xmm14
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
xorq %r10, %r10
xorq %r11, %r11
movdqa %xmm15, %xmm14
L_521_get_point_33_9_start_2:
movdqa %xmm14, %xmm12
paddd %xmm15, %xmm14
pcmpeqd %xmm13, %xmm12
xorq %rcx, %rcx
cmpq %r12, %rdx
sete %cl
negq %rcx
incq %r12
movdqu 176(%rsi), %xmm6
movdqu 192(%rsi), %xmm7
movq 208(%rsi), %r8
movdqu 288(%rsi), %xmm8
movdqu 304(%rsi), %xmm9
movdqu 320(%rsi), %xmm10
movdqu 336(%rsi), %xmm11
movq 352(%rsi), %r9
addq $0x1b8, %rsi
pand %xmm12, %xmm6
pand %xmm12, %xmm7
pand %xmm12, %xmm8
pand %xmm12, %xmm9
pand %xmm12, %xmm10
pand %xmm12, %xmm11
andq %rcx, %r8
andq %rcx, %r9
por %xmm6, %xmm0
por %xmm7, %xmm1
por %xmm8, %xmm2
por %xmm9, %xmm3
por %xmm10, %xmm4
por %xmm11, %xmm5
orq %r8, %r10
orq %r9, %r11
decq %rax
jnz L_521_get_point_33_9_start_2
movdqu %xmm0, 176(%rdi)
movdqu %xmm1, 192(%rdi)
movq %r10, 208(%rdi)
movdqu %xmm2, 288(%rdi)
movdqu %xmm3, 304(%rdi)
movdqu %xmm4, 320(%rdi)
movdqu %xmm5, 336(%rdi)
movq %r11, 352(%rdi)
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_get_point_33_9,.-sp_521_get_point_33_9
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Touch each possible point that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of point to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_521_get_point_33_avx2_9
.type sp_521_get_point_33_avx2_9,@function
.align 16
sp_521_get_point_33_avx2_9:
#else
.section __TEXT,__text
.globl _sp_521_get_point_33_avx2_9
.p2align 4
_sp_521_get_point_33_avx2_9:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
movq $0x01, %r14
movq $0x01, %rax
movd %edx, %xmm13
addq $0x1b8, %rsi
movd %eax, %xmm15
movq $32, %rax
vpxor %ymm14, %ymm14, %ymm14
vpermd %ymm13, %ymm14, %ymm13
vpermd %ymm15, %ymm14, %ymm15
vpxor %ymm0, %ymm0, %ymm0
vpxor %ymm1, %ymm1, %ymm1
vpxor %ymm2, %ymm2, %ymm2
vpxor %ymm3, %ymm3, %ymm3
vpxor %ymm4, %ymm4, %ymm4
vpxor %ymm5, %ymm5, %ymm5
xorq %r8, %r8
xorq %r9, %r9
xorq %r10, %r10
vmovdqa %ymm15, %ymm14
L_521_get_point_33_avx2_9_start:
vpcmpeqd %ymm13, %ymm14, %ymm12
vpaddd %ymm15, %ymm14, %ymm14
xorq %rcx, %rcx
cmpq %r14, %rdx
sete %cl
negq %rcx
incq %r14
vmovupd (%rsi), %ymm6
vmovupd 32(%rsi), %ymm7
vmovupd 144(%rsi), %ymm8
vmovupd 176(%rsi), %ymm9
vmovupd 288(%rsi), %ymm10
vmovupd 320(%rsi), %ymm11
movq 64(%rsi), %r11
movq 208(%rsi), %r12
movq 352(%rsi), %r13
addq $0x1b8, %rsi
vpand %ymm12, %ymm6, %ymm6
vpand %ymm12, %ymm7, %ymm7
vpand %ymm12, %ymm8, %ymm8
vpand %ymm12, %ymm9, %ymm9
vpand %ymm12, %ymm10, %ymm10
vpand %ymm12, %ymm11, %ymm11
andq %rcx, %r11
andq %rcx, %r12
andq %rcx, %r13
vpor %ymm6, %ymm0, %ymm0
vpor %ymm7, %ymm1, %ymm1
vpor %ymm8, %ymm2, %ymm2
vpor %ymm9, %ymm3, %ymm3
vpor %ymm10, %ymm4, %ymm4
vpor %ymm11, %ymm5, %ymm5
orq %r11, %r8
orq %r12, %r9
orq %r13, %r10
decq %rax
jnz L_521_get_point_33_avx2_9_start
vmovupd %ymm0, (%rdi)
vmovupd %ymm1, 32(%rdi)
vmovupd %ymm2, 144(%rdi)
vmovupd %ymm3, 176(%rdi)
vmovupd %ymm4, 288(%rdi)
vmovupd %ymm5, 320(%rdi)
movq %r8, 64(%rdi)
movq %r9, 208(%rdi)
movq %r10, 352(%rdi)
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_get_point_33_avx2_9,.-sp_521_get_point_33_avx2_9
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* !WC_NO_CACHE_RESISTANT */
#ifdef HAVE_INTEL_AVX2
/* Multiply two Montgomery form numbers mod the modulus (prime).
* (r = a * b mod m)
*
* r Result of multiplication.
* a First number to multiply in Montgomery form.
* b Second number to multiply in Montgomery form.
* m Modulus (prime).
* mp Montgomery multiplier.
*/
#ifndef __APPLE__
.text
.globl sp_521_mont_mul_avx2_9
.type sp_521_mont_mul_avx2_9,@function
.align 16
sp_521_mont_mul_avx2_9:
#else
.section __TEXT,__text
.globl _sp_521_mont_mul_avx2_9
.p2align 4
_sp_521_mont_mul_avx2_9:
#endif /* __APPLE__ */
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
movq %rdx, %rbp
subq $0x90, %rsp
movq %rsp, %rbx
addq $0x48, %rsp
xorq %r13, %r13
movq (%rsi), %rdx
# A[0] * B[0]
mulx (%rbp), %r8, %r9
# A[0] * B[1]
mulx 8(%rbp), %rax, %r10
movq %r8, (%rbx)
adcxq %rax, %r9
# A[0] * B[2]
mulx 16(%rbp), %rax, %r11
movq %r9, 8(%rbx)
adcxq %rax, %r10
movq %r10, 16(%rbx)
# A[0] * B[3]
mulx 24(%rbp), %rax, %r8
adcxq %rax, %r11
# A[0] * B[4]
mulx 32(%rbp), %rax, %r9
movq %r11, 24(%rbx)
adcxq %rax, %r8
# A[0] * B[5]
mulx 40(%rbp), %rax, %r10
movq %r8, 32(%rbx)
adcxq %rax, %r9
movq %r9, 40(%rbx)
# A[0] * B[6]
mulx 48(%rbp), %rax, %r11
adcxq %rax, %r10
# A[0] * B[7]
mulx 56(%rbp), %rax, %r8
movq %r10, 48(%rbx)
adcxq %rax, %r11
# A[0] * B[8]
mulx 64(%rbp), %rax, %r9
movq %r11, 56(%rbx)
adcxq %rax, %r8
adcxq %r13, %r9
movq %r13, %r12
adcxq %r13, %r12
movq %r8, 64(%rbx)
movq %r9, (%rsp)
movq 8(%rsi), %rdx
movq 8(%rbx), %r9
movq 16(%rbx), %r10
movq 24(%rbx), %r11
movq 32(%rbx), %r8
# A[1] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[1] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 8(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[1] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r10, 16(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r8
movq %r11, 24(%rbx)
movq 40(%rbx), %r9
movq 48(%rbx), %r10
movq 56(%rbx), %r11
# A[1] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[1] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r8, 32(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[1] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r9, 40(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rbx)
movq 64(%rbx), %r8
movq (%rsp), %r9
# A[1] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r8
# A[1] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r11, 56(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[1] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r8, 64(%rbx)
movq %r13, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r12, %r10
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r9, (%rsp)
movq %r10, 8(%rsp)
movq 16(%rsi), %rdx
movq 16(%rbx), %r10
movq 24(%rbx), %r11
movq 32(%rbx), %r8
movq 40(%rbx), %r9
# A[2] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[2] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 16(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r8
# A[2] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r11, 24(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 32(%rbx)
movq 48(%rbx), %r10
movq 56(%rbx), %r11
movq 64(%rbx), %r8
# A[2] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[2] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r9, 40(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[2] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 48(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r8
movq %r11, 56(%rbx)
movq (%rsp), %r9
movq 8(%rsp), %r10
# A[2] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[2] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r8, 64(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[2] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r9, (%rsp)
movq %r13, %r11
adcxq %rax, %r10
adoxq %rcx, %r11
adcxq %r12, %r11
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r10, 8(%rsp)
movq %r11, 16(%rsp)
movq 24(%rsi), %rdx
movq 24(%rbx), %r11
movq 32(%rbx), %r8
movq 40(%rbx), %r9
movq 48(%rbx), %r10
# A[3] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r8
# A[3] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r11, 24(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[3] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r8, 32(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 40(%rbx)
movq 56(%rbx), %r11
movq 64(%rbx), %r8
movq (%rsp), %r9
# A[3] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[3] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r10, 48(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r8
# A[3] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r11, 56(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 64(%rbx)
movq 8(%rsp), %r10
movq 16(%rsp), %r11
# A[3] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[3] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, (%rsp)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[3] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r10, 8(%rsp)
movq %r13, %r8
adcxq %rax, %r11
adoxq %rcx, %r8
adcxq %r12, %r8
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r11, 16(%rsp)
movq %r8, 24(%rsp)
movq 32(%rsi), %rdx
movq 32(%rbx), %r8
movq 40(%rbx), %r9
movq 48(%rbx), %r10
movq 56(%rbx), %r11
# A[4] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[4] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r8, 32(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[4] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r9, 40(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rbx)
movq 64(%rbx), %r8
movq (%rsp), %r9
movq 8(%rsp), %r10
# A[4] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r8
# A[4] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r11, 56(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[4] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, 64(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, (%rsp)
movq 16(%rsp), %r11
movq 24(%rsp), %r8
# A[4] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[4] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 8(%rsp)
adcxq %rax, %r11
adoxq %rcx, %r8
# A[4] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r11, 16(%rsp)
movq %r13, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r12, %r9
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r8, 24(%rsp)
movq %r9, 32(%rsp)
movq 40(%rsi), %rdx
movq 40(%rbx), %r9
movq 48(%rbx), %r10
movq 56(%rbx), %r11
movq 64(%rbx), %r8
# A[5] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[5] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 40(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[5] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r10, 48(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r8
movq %r11, 56(%rbx)
movq (%rsp), %r9
movq 8(%rsp), %r10
movq 16(%rsp), %r11
# A[5] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r8, 64(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[5] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r9, (%rsp)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 8(%rsp)
movq 24(%rsp), %r8
movq 32(%rsp), %r9
# A[5] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r8
# A[5] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r11, 16(%rsp)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r8, 24(%rsp)
movq %r13, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r12, %r10
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r9, 32(%rsp)
movq %r10, 40(%rsp)
movq 48(%rsi), %rdx
movq 48(%rbx), %r10
movq 56(%rbx), %r11
movq 64(%rbx), %r8
movq (%rsp), %r9
# A[6] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[6] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 48(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r8
# A[6] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r11, 56(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 64(%rbx)
movq 8(%rsp), %r10
movq 16(%rsp), %r11
movq 24(%rsp), %r8
# A[6] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[6] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r9, (%rsp)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[6] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 8(%rsp)
adcxq %rax, %r11
adoxq %rcx, %r8
movq %r11, 16(%rsp)
movq 32(%rsp), %r9
movq 40(%rsp), %r10
# A[6] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[6] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r8, 24(%rsp)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[6] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r9, 32(%rsp)
movq %r13, %r11
adcxq %rax, %r10
adoxq %rcx, %r11
adcxq %r12, %r11
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r10, 40(%rsp)
movq %r11, 48(%rsp)
movq 56(%rsi), %rdx
movq 56(%rbx), %r11
movq 64(%rbx), %r8
movq (%rsp), %r9
movq 8(%rsp), %r10
# A[7] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r8
# A[7] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r11, 56(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[7] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r8, 64(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, (%rsp)
movq 16(%rsp), %r11
movq 24(%rsp), %r8
movq 32(%rsp), %r9
# A[7] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[7] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r10, 8(%rsp)
adcxq %rax, %r11
adoxq %rcx, %r8
# A[7] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r11, 16(%rsp)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 24(%rsp)
movq 40(%rsp), %r10
movq 48(%rsp), %r11
# A[7] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[7] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, 32(%rsp)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[7] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r10, 40(%rsp)
movq %r13, %r8
adcxq %rax, %r11
adoxq %rcx, %r8
adcxq %r12, %r8
movq %r13, %r12
adoxq %r13, %r12
adcxq %r13, %r12
movq %r11, 48(%rsp)
movq %r8, 56(%rsp)
movq 64(%rsi), %rdx
movq 64(%rbx), %r8
movq (%rsp), %r9
movq 8(%rsp), %r10
movq 16(%rsp), %r11
# A[8] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[8] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r8, 64(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[8] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r9, (%rsp)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 8(%rsp)
movq 24(%rsp), %r8
movq 32(%rsp), %r9
movq 40(%rsp), %r10
# A[8] * B[3]
mulx 24(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r8
# A[8] * B[4]
mulx 32(%rbp), %rax, %rcx
movq %r11, 16(%rsp)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[8] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, 24(%rsp)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 32(%rsp)
movq 48(%rsp), %r11
movq 56(%rsp), %r8
# A[8] * B[6]
mulx 48(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[8] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 40(%rsp)
adcxq %rax, %r11
adoxq %rcx, %r8
# A[8] * B[8]
mulx 64(%rbp), %rax, %rcx
movq %r11, 48(%rsp)
movq %r13, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r12, %r9
movq %r8, 56(%rsp)
movq %r9, 64(%rsp)
movq -8(%rsp), %rax
movq (%rsp), %rcx
movq 8(%rsp), %r8
movq %rax, %r13
andq $0x1ff, %r13
movq 16(%rsp), %r9
movq 24(%rsp), %r10
movq 32(%rsp), %r11
movq 40(%rsp), %r12
movq 48(%rsp), %rbx
movq 56(%rsp), %rdx
subq $0x48, %rsp
shrdq $9, %rcx, %rax
shrdq $9, %r8, %rcx
shrdq $9, %r9, %r8
shrdq $9, %r10, %r9
shrdq $9, %r11, %r10
shrdq $9, %r12, %r11
shrdq $9, %rbx, %r12
shrdq $9, %rdx, %rbx
shrq $9, %rdx
addq (%rsp), %rax
adcq 8(%rsp), %rcx
adcq 16(%rsp), %r8
adcq 24(%rsp), %r9
adcq 32(%rsp), %r10
adcq 40(%rsp), %r11
adcq 48(%rsp), %r12
adcq 56(%rsp), %rbx
adcq %rdx, %r13
movq %r13, %rdx
shrq $9, %r13
andq $0x1ff, %rdx
addq %r13, %rax
adcq $0x00, %rcx
adcq $0x00, %r8
adcq $0x00, %r9
adcq $0x00, %r10
adcq $0x00, %r11
adcq $0x00, %r12
adcq $0x00, %rbx
adcq $0x00, %rdx
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq %r10, 32(%rdi)
movq %r11, 40(%rdi)
movq %r12, 48(%rdi)
movq %rbx, 56(%rdi)
movq %rdx, 64(%rdi)
addq $0x90, %rsp
popq %r13
popq %r12
popq %rbp
popq %rbx
repz retq
#ifndef __APPLE__
.size sp_521_mont_mul_avx2_9,.-sp_521_mont_mul_avx2_9
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Square the Montgomery form number mod the modulus (prime). (r = a * a mod m)
*
* r Result of squaring.
* a Number to square in Montgomery form.
* m Modulus (prime).
* mp Montgomery multiplier.
*/
#ifndef __APPLE__
.text
.globl sp_521_mont_sqr_avx2_9
.type sp_521_mont_sqr_avx2_9,@function
.align 16
sp_521_mont_sqr_avx2_9:
#else
.section __TEXT,__text
.globl _sp_521_mont_sqr_avx2_9
.p2align 4
_sp_521_mont_sqr_avx2_9:
#endif /* __APPLE__ */
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
subq $0x90, %rsp
movq %rsp, %rbp
addq $0x48, %rsp
xorq %r10, %r10
# Diagonal 1
# Zero into %r9
# A[1] x A[0]
movq (%rsi), %rdx
mulxq 8(%rsi), %r8, %r9
movq %r8, 8(%rbp)
# Zero into %r8
# A[2] x A[0]
mulxq 16(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 16(%rbp)
# No load %r12 - %r9
# A[3] x A[0]
mulxq 24(%rsi), %rax, %r12
adcxq %rax, %r8
adoxq %r10, %r12
movq %r8, 24(%rbp)
# No load %r13 - %r8
# A[4] x A[0]
mulxq 32(%rsi), %rax, %r13
adcxq %rax, %r12
adoxq %r10, %r13
# No store %r12 - %r9
# No load %r14 - %r9
# A[5] x A[0]
mulxq 40(%rsi), %rax, %r14
adcxq %rax, %r13
adoxq %r10, %r14
# No store %r13 - %r8
# No load %r15 - %r8
# A[6] x A[0]
mulxq 48(%rsi), %rax, %r15
adcxq %rax, %r14
adoxq %r10, %r15
# No store %r14 - %r9
# No load %rbx - %r9
# A[7] x A[0]
mulxq 56(%rsi), %rax, %rbx
adcxq %rax, %r15
adoxq %r10, %rbx
# No store %r15 - %r8
# Zero into %r8
# A[8] x A[0]
mulxq 64(%rsi), %rax, %r8
adcxq %rax, %rbx
adoxq %r10, %r8
# No store %rbx - %r9
# Zero into %r9
# A[8] x A[1]
movq 8(%rsi), %rdx
mulxq 64(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, (%rsp)
# Carry
adcxq %r10, %r9
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r9, 8(%rsp)
# Diagonal 2
movq 24(%rbp), %r9
# No load %r12 - %r8
# A[2] x A[1]
mulxq 16(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r12
movq %r9, 24(%rbp)
# No load %r13 - %r9
# A[3] x A[1]
mulxq 24(%rsi), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r13
# No store %r12 - %r8
# No load %r14 - %r8
# A[4] x A[1]
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
# No store %r13 - %r9
# No load %r15 - %r9
# A[5] x A[1]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# No store %r14 - %r8
# No load %rbx - %r8
# A[6] x A[1]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r15 - %r9
movq (%rsp), %r9
# A[7] x A[1]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r9
# No store %rbx - %r8
movq 8(%rsp), %r8
# A[7] x A[2]
movq 16(%rsi), %rdx
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, (%rsp)
# Zero into %r9
# A[7] x A[3]
movq 24(%rsi), %rdx
mulxq 56(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, 8(%rsp)
# Zero into %r8
# A[7] x A[4]
movq 32(%rsi), %rdx
mulxq 56(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 16(%rsp)
# Carry
adcxq %r11, %r8
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r8, 24(%rsp)
# Diagonal 3
# No load %r14 - %r9
# A[3] x A[2]
movq 16(%rsi), %rdx
mulxq 24(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
# No store %r13 - %r8
# No load %r15 - %r8
# A[4] x A[2]
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# No store %r14 - %r9
# No load %rbx - %r9
# A[5] x A[2]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r15 - %r8
movq (%rsp), %r8
# A[6] x A[2]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r8
# No store %rbx - %r9
movq 8(%rsp), %r9
# A[6] x A[3]
movq 24(%rsi), %rdx
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, (%rsp)
movq 16(%rsp), %r8
# A[6] x A[4]
movq 32(%rsi), %rdx
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 8(%rsp)
movq 24(%rsp), %r9
# A[6] x A[5]
movq 40(%rsi), %rdx
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 16(%rsp)
# Zero into %r8
# A[8] x A[4]
movq 32(%rsi), %rdx
mulxq 64(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 24(%rsp)
# Zero into %r9
# A[8] x A[5]
movq 40(%rsi), %rdx
mulxq 64(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, 32(%rsp)
# Carry
adcxq %r11, %r9
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r9, 40(%rsp)
# Diagonal 4
# No load %rbx - %r8
# A[4] x A[3]
movq 24(%rsi), %rdx
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r15 - %r9
movq (%rsp), %r9
# A[5] x A[3]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r9
# No store %rbx - %r8
movq 8(%rsp), %r8
# A[5] x A[4]
movq 32(%rsi), %rdx
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, (%rsp)
movq 16(%rsp), %r9
# A[8] x A[2]
movq 16(%rsi), %rdx
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 8(%rsp)
movq 24(%rsp), %r8
# A[8] x A[3]
movq 24(%rsi), %rdx
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 16(%rsp)
movq 32(%rsp), %r9
# A[7] x A[5]
movq 40(%rsi), %rdx
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 24(%rsp)
movq 40(%rsp), %r8
# A[7] x A[6]
movq 48(%rsi), %rdx
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r8
movq %r9, 32(%rsp)
# Zero into %r9
# A[8] x A[6]
mulxq 64(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r10, %r9
movq %r8, 40(%rsp)
# Zero into %r8
# A[8] x A[7]
movq 56(%rsi), %rdx
mulxq 64(%rsi), %rax, %r8
adcxq %rax, %r9
adoxq %r10, %r8
movq %r9, 48(%rsp)
# Carry
adcxq %r11, %r8
movq %r10, %r11
adcxq %r10, %r11
adoxq %r10, %r11
movq %r8, 56(%rsp)
movq %r11, 64(%rsp)
# Double and Add in A[i] x A[i]
movq 8(%rbp), %r9
# A[0] x A[0]
movq (%rsi), %rdx
mulxq %rdx, %rax, %rcx
movq %rax, (%rbp)
adoxq %r9, %r9
adcxq %rcx, %r9
movq %r9, 8(%rbp)
movq 16(%rbp), %r8
movq 24(%rbp), %r9
# A[1] x A[1]
movq 8(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 16(%rbp)
movq %r9, 24(%rbp)
# A[2] x A[2]
movq 16(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r12, %r12
adoxq %r13, %r13
adcxq %rax, %r12
adcxq %rcx, %r13
# A[3] x A[3]
movq 24(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r14, %r14
adoxq %r15, %r15
adcxq %rax, %r14
adcxq %rcx, %r15
movq (%rsp), %r9
# A[4] x A[4]
movq 32(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %rbx, %rbx
adoxq %r9, %r9
adcxq %rax, %rbx
adcxq %rcx, %r9
movq %r9, (%rsp)
movq 8(%rsp), %r8
movq 16(%rsp), %r9
# A[5] x A[5]
movq 40(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 8(%rsp)
movq %r9, 16(%rsp)
movq 24(%rsp), %r8
movq 32(%rsp), %r9
# A[6] x A[6]
movq 48(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 24(%rsp)
movq %r9, 32(%rsp)
movq 40(%rsp), %r8
movq 48(%rsp), %r9
# A[7] x A[7]
movq 56(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 40(%rsp)
movq %r9, 48(%rsp)
movq 56(%rsp), %r8
movq 64(%rsp), %r9
# A[8] x A[8]
movq 64(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 56(%rsp)
movq %r9, 64(%rsp)
movq %r12, -40(%rsp)
movq %r13, -32(%rsp)
movq %r14, -24(%rsp)
movq %r15, -16(%rsp)
movq %rbx, -8(%rsp)
movq -8(%rsp), %r8
movq (%rsp), %r9
movq 8(%rsp), %r12
movq %r8, %rcx
andq $0x1ff, %rcx
movq 16(%rsp), %r13
movq 24(%rsp), %r14
movq 32(%rsp), %r15
movq 40(%rsp), %rbx
movq 48(%rsp), %rdx
movq 56(%rsp), %rax
subq $0x48, %rsp
shrdq $9, %r9, %r8
shrdq $9, %r12, %r9
shrdq $9, %r13, %r12
shrdq $9, %r14, %r13
shrdq $9, %r15, %r14
shrdq $9, %rbx, %r15
shrdq $9, %rdx, %rbx
shrdq $9, %rax, %rdx
shrq $9, %rax
addq (%rsp), %r8
adcq 8(%rsp), %r9
adcq 16(%rsp), %r12
adcq 24(%rsp), %r13
adcq 32(%rsp), %r14
adcq 40(%rsp), %r15
adcq 48(%rsp), %rbx
adcq 56(%rsp), %rdx
adcq %rax, %rcx
movq %rcx, %rax
shrq $9, %rcx
andq $0x1ff, %rax
addq %rcx, %r8
adcq $0x00, %r9
adcq $0x00, %r12
adcq $0x00, %r13
adcq $0x00, %r14
adcq $0x00, %r15
adcq $0x00, %rbx
adcq $0x00, %rdx
adcq $0x00, %rax
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, 32(%rdi)
movq %r15, 40(%rdi)
movq %rbx, 48(%rdi)
movq %rdx, 56(%rdi)
movq %rax, 64(%rdi)
addq $0x90, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
repz retq
#ifndef __APPLE__
.size sp_521_mont_sqr_avx2_9,.-sp_521_mont_sqr_avx2_9
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_521_cond_sub_avx2_9
.type sp_521_cond_sub_avx2_9,@function
.align 16
sp_521_cond_sub_avx2_9:
#else
.section __TEXT,__text
.globl _sp_521_cond_sub_avx2_9
.p2align 4
_sp_521_cond_sub_avx2_9:
#endif /* __APPLE__ */
movq (%rdx), %r10
movq (%rsi), %r8
pextq %rcx, %r10, %r10
subq %r10, %r8
movq 8(%rdx), %r10
movq 8(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, (%rdi)
sbbq %r10, %r9
movq 16(%rdx), %r8
movq 16(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 8(%rdi)
sbbq %r8, %r10
movq 24(%rdx), %r9
movq 24(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 16(%rdi)
sbbq %r9, %r8
movq 32(%rdx), %r10
movq 32(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 24(%rdi)
sbbq %r10, %r9
movq 40(%rdx), %r8
movq 40(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 32(%rdi)
sbbq %r8, %r10
movq 48(%rdx), %r9
movq 48(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 40(%rdi)
sbbq %r9, %r8
movq 56(%rdx), %r10
movq 56(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 48(%rdi)
sbbq %r10, %r9
movq 64(%rdx), %r8
movq 64(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 56(%rdi)
sbbq %r8, %r10
movq %r10, 64(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_521_cond_sub_avx2_9,.-sp_521_cond_sub_avx2_9
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Reduce the number back to 521 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_521_mont_reduce_order_avx2_9
.type sp_521_mont_reduce_order_avx2_9,@function
.align 16
sp_521_mont_reduce_order_avx2_9:
#else
.section __TEXT,__text
.globl _sp_521_mont_reduce_order_avx2_9
.p2align 4
_sp_521_mont_reduce_order_avx2_9:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %r8
xorq %rbp, %rbp
# i = 9
movq $8, %r9
movq (%rdi), %r12
movq 8(%rdi), %r13
movq 16(%rdi), %r14
movq 24(%rdi), %r15
addq $32, %rdi
xorq %rbp, %rbp
L_521_mont_reduce_order_avx2_9_loop:
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r10
imulq %r8, %rdx
xorq %rbx, %rbx
# a[i+0] += m[0] * mu
mulxq (%rsi), %rax, %rcx
movq %r13, %r12
adcxq %rax, %r10
adoxq %rcx, %r12
movq %r10, -32(%rdi)
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rax, %rcx
movq %r14, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rax, %rcx
movq %r15, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rax, %rcx
movq (%rdi), %r15
adcxq %rax, %r14
adoxq %rcx, %r15
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rax, %rcx
movq 8(%rdi), %r11
adcxq %rax, %r15
adoxq %rcx, %r11
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rax, %rcx
movq 16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 8(%rdi)
# a[i+6] += m[6] * mu
mulxq 48(%rsi), %rax, %rcx
movq 24(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 16(%rdi)
# a[i+7] += m[7] * mu
mulxq 56(%rsi), %rax, %rcx
movq 32(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 24(%rdi)
# a[i+8] += m[8] * mu
mulxq 64(%rsi), %rax, %rcx
movq 40(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 32(%rdi)
adcxq %rbp, %r11
movq %rbx, %rbp
movq %r11, 40(%rdi)
adoxq %rbx, %rbp
adcxq %rbx, %rbp
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r11
imulq %r8, %rdx
xorq %rbx, %rbx
# a[i+0] += m[0] * mu
mulxq (%rsi), %rax, %rcx
movq %r13, %r12
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, -24(%rdi)
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rax, %rcx
movq %r14, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rax, %rcx
movq %r15, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rax, %rcx
movq 8(%rdi), %r15
adcxq %rax, %r14
adoxq %rcx, %r15
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rax, %rcx
movq 16(%rdi), %r10
adcxq %rax, %r15
adoxq %rcx, %r10
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rax, %rcx
movq 24(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 16(%rdi)
# a[i+6] += m[6] * mu
mulxq 48(%rsi), %rax, %rcx
movq 32(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 24(%rdi)
# a[i+7] += m[7] * mu
mulxq 56(%rsi), %rax, %rcx
movq 40(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 32(%rdi)
# a[i+8] += m[8] * mu
mulxq 64(%rsi), %rax, %rcx
movq 48(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 40(%rdi)
adcxq %rbp, %r10
movq %rbx, %rbp
movq %r10, 48(%rdi)
adoxq %rbx, %rbp
adcxq %rbx, %rbp
# a += 2
addq $16, %rdi
# i -= 2
subq $2, %r9
jnz L_521_mont_reduce_order_avx2_9_loop
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r10
imulq %r8, %rdx
andq $0x1ff, %rdx
xorq %rbx, %rbx
# a[i+0] += m[0] * mu
mulxq (%rsi), %rax, %rcx
movq %r13, %r12
adcxq %rax, %r10
adoxq %rcx, %r12
movq %r10, -32(%rdi)
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rax, %rcx
movq %r14, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rax, %rcx
movq %r15, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rax, %rcx
movq (%rdi), %r15
adcxq %rax, %r14
adoxq %rcx, %r15
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rax, %rcx
movq 8(%rdi), %r11
adcxq %rax, %r15
adoxq %rcx, %r11
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rax, %rcx
movq 16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 8(%rdi)
# a[i+6] += m[6] * mu
mulxq 48(%rsi), %rax, %rcx
movq 24(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 16(%rdi)
# a[i+7] += m[7] * mu
mulxq 56(%rsi), %rax, %rcx
movq 32(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 24(%rdi)
# a[i+8] += m[8] * mu
mulxq 64(%rsi), %rax, %rcx
movq 40(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 32(%rdi)
adcxq %rbp, %r11
movq %rbx, %rbp
movq %r11, 40(%rdi)
adoxq %rbx, %rbp
# a += 1
addq $8, %rdi
movq %r12, -32(%rdi)
movq %r13, -24(%rdi)
movq %r14, -16(%rdi)
movq %r15, -8(%rdi)
subq $32, %rdi
leaq -8(%rdi), %r8
subq $0x48, %rdi
movq (%r8), %r10
movq 8(%r8), %r12
movq 16(%r8), %r13
movq 24(%r8), %r14
movq 32(%r8), %r11
shrdq $9, %r12, %r10
shrdq $9, %r13, %r12
shrdq $9, %r14, %r13
shrdq $9, %r11, %r14
movq %r10, (%rdi)
movq %r12, 8(%rdi)
movq %r13, 16(%rdi)
movq %r14, 24(%rdi)
movq 40(%r8), %r12
movq 48(%r8), %r13
movq 56(%r8), %r14
movq 64(%r8), %r10
shrdq $9, %r12, %r11
shrdq $9, %r13, %r12
shrdq $9, %r14, %r13
shrdq $9, %r10, %r14
movq %r11, 32(%rdi)
movq %r12, 40(%rdi)
movq %r13, 48(%rdi)
movq %r14, 56(%rdi)
movq 72(%r8), %r12
shrdq $9, %r12, %r10
shrq $9, %r12
movq %r10, 64(%rdi)
movq %r12, 72(%rdi)
movq 64(%rdi), %rbp
shrq $9, %rbp
negq %rbp
movq (%rsi), %rcx
movq (%rdi), %rdx
pextq %rbp, %rcx, %rcx
subq %rcx, %rdx
movq 8(%rsi), %rcx
movq 8(%rdi), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, (%rdi)
sbbq %rcx, %rax
movq 16(%rsi), %rdx
movq 16(%rdi), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 8(%rdi)
sbbq %rdx, %rcx
movq 24(%rsi), %rax
movq 24(%rdi), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 16(%rdi)
sbbq %rax, %rdx
movq 32(%rsi), %rcx
movq 32(%rdi), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 24(%rdi)
sbbq %rcx, %rax
movq 40(%rsi), %rdx
movq 40(%rdi), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 32(%rdi)
sbbq %rdx, %rcx
movq 48(%rsi), %rax
movq 48(%rdi), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 40(%rdi)
sbbq %rax, %rdx
movq 56(%rsi), %rcx
movq 56(%rdi), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 48(%rdi)
sbbq %rcx, %rax
movq 64(%rsi), %rdx
movq 64(%rdi), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 56(%rdi)
sbbq %rdx, %rcx
movq %rcx, 64(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_mont_reduce_order_avx2_9,.-sp_521_mont_reduce_order_avx2_9
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Divide the number by 2 mod the modulus (prime). (r = a / 2 % m)
*
* r Result of division by 2.
* a Number to divide.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_521_mont_div2_avx2_9
.type sp_521_mont_div2_avx2_9,@function
.align 16
sp_521_mont_div2_avx2_9:
#else
.section __TEXT,__text
.globl _sp_521_mont_div2_avx2_9
.p2align 4
_sp_521_mont_div2_avx2_9:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
movq 32(%rsi), %r9
movq 40(%rsi), %r10
movq 48(%rsi), %r11
movq 56(%rsi), %r12
movq 64(%rsi), %r13
movq %rdx, %r14
andq $0x01, %r14
subq %r14, %rdx
sbbq $0x00, %rax
sbbq $0x00, %rcx
sbbq $0x00, %r8
sbbq $0x00, %r9
sbbq $0x00, %r10
sbbq $0x00, %r11
sbbq $0x00, %r12
sbbq $0x00, %r13
shlq $9, %r14
addq %r14, %r13
shrdq $0x01, %rax, %rdx
shrdq $0x01, %rcx, %rax
shrdq $0x01, %r8, %rcx
shrdq $0x01, %r9, %r8
shrdq $0x01, %r10, %r9
shrdq $0x01, %r11, %r10
shrdq $0x01, %r12, %r11
shrdq $0x01, %r13, %r12
shrq $0x01, %r13
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
movq %r10, 40(%rdi)
movq %r11, 48(%rdi)
movq %r12, 56(%rdi)
movq %r13, 64(%rdi)
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_mont_div2_avx2_9,.-sp_521_mont_div2_avx2_9
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifndef WC_NO_CACHE_RESISTANT
/* Touch each possible entry that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of entry to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_521_get_entry_64_9
.type sp_521_get_entry_64_9,@function
.align 16
sp_521_get_entry_64_9:
#else
.section __TEXT,__text
.globl _sp_521_get_entry_64_9
.p2align 4
_sp_521_get_entry_64_9:
#endif /* __APPLE__ */
# From entry 1
movq $0x01, %r10
movq $0x01, %rax
movd %edx, %xmm13
addq $0x90, %rsi
movd %eax, %xmm15
movq $63, %rax
pshufd $0x00, %xmm15, %xmm15
pshufd $0x00, %xmm13, %xmm13
pxor %xmm14, %xmm14
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
xorq %r9, %r9
movdqa %xmm15, %xmm14
L_521_get_entry_64_9_start_0:
movdqa %xmm14, %xmm12
paddd %xmm15, %xmm14
pcmpeqd %xmm13, %xmm12
xorq %rcx, %rcx
cmpq %r10, %rdx
sete %cl
negq %rcx
incq %r10
movdqu (%rsi), %xmm4
movdqu 16(%rsi), %xmm5
movdqu 32(%rsi), %xmm6
movdqu 48(%rsi), %xmm7
movq 64(%rsi), %r8
addq $0x90, %rsi
pand %xmm12, %xmm4
pand %xmm12, %xmm5
pand %xmm12, %xmm6
pand %xmm12, %xmm7
andq %rcx, %r8
por %xmm4, %xmm0
por %xmm5, %xmm1
por %xmm6, %xmm2
por %xmm7, %xmm3
orq %r8, %r9
decq %rax
jnz L_521_get_entry_64_9_start_0
movdqu %xmm0, (%rdi)
movdqu %xmm1, 16(%rdi)
movdqu %xmm2, 32(%rdi)
movdqu %xmm3, 48(%rdi)
movq %r9, 64(%rdi)
# From entry 1
movq $0x01, %r10
movq $0x01, %rax
movd %edx, %xmm13
subq $0x2328, %rsi
movd %eax, %xmm15
movq $63, %rax
pshufd $0x00, %xmm15, %xmm15
pshufd $0x00, %xmm13, %xmm13
pxor %xmm14, %xmm14
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
xorq %r9, %r9
movdqa %xmm15, %xmm14
L_521_get_entry_64_9_start_1:
movdqa %xmm14, %xmm12
paddd %xmm15, %xmm14
pcmpeqd %xmm13, %xmm12
xorq %rcx, %rcx
cmpq %r10, %rdx
sete %cl
negq %rcx
incq %r10
movdqu (%rsi), %xmm4
movdqu 16(%rsi), %xmm5
movdqu 32(%rsi), %xmm6
movdqu 48(%rsi), %xmm7
movq 64(%rsi), %r8
addq $0x90, %rsi
pand %xmm12, %xmm4
pand %xmm12, %xmm5
pand %xmm12, %xmm6
pand %xmm12, %xmm7
andq %rcx, %r8
por %xmm4, %xmm0
por %xmm5, %xmm1
por %xmm6, %xmm2
por %xmm7, %xmm3
orq %r8, %r9
decq %rax
jnz L_521_get_entry_64_9_start_1
movdqu %xmm0, 144(%rdi)
movdqu %xmm1, 160(%rdi)
movdqu %xmm2, 176(%rdi)
movdqu %xmm3, 192(%rdi)
movq %r9, 208(%rdi)
repz retq
#ifndef __APPLE__
.size sp_521_get_entry_64_9,.-sp_521_get_entry_64_9
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Touch each possible entry that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of entry to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_521_get_entry_64_avx2_9
.type sp_521_get_entry_64_avx2_9,@function
.align 16
sp_521_get_entry_64_avx2_9:
#else
.section __TEXT,__text
.globl _sp_521_get_entry_64_avx2_9
.p2align 4
_sp_521_get_entry_64_avx2_9:
#endif /* __APPLE__ */
pushq %r12
movq $0x01, %r12
movq $0x01, %rax
movd %edx, %xmm9
addq $0x90, %rsi
movd %eax, %xmm11
movq $0x40, %rax
vpxor %ymm10, %ymm10, %ymm10
vpermd %ymm9, %ymm10, %ymm9
vpermd %ymm11, %ymm10, %ymm11
vpxor %ymm0, %ymm0, %ymm0
vpxor %ymm1, %ymm1, %ymm1
vpxor %ymm2, %ymm2, %ymm2
vpxor %ymm3, %ymm3, %ymm3
xorq %r8, %r8
xorq %r9, %r9
vmovdqa %ymm11, %ymm10
L_521_get_entry_64_avx2_9_start:
vpcmpeqd %ymm9, %ymm10, %ymm8
vpaddd %ymm11, %ymm10, %ymm10
xorq %rcx, %rcx
cmpq %r12, %rdx
sete %cl
negq %rcx
incq %r12
vmovupd (%rsi), %ymm4
vmovupd 32(%rsi), %ymm5
vmovupd 72(%rsi), %ymm6
vmovupd 104(%rsi), %ymm7
movq 64(%rsi), %r10
movq 136(%rsi), %r11
addq $0x90, %rsi
vpand %ymm8, %ymm4, %ymm4
vpand %ymm8, %ymm5, %ymm5
vpand %ymm8, %ymm6, %ymm6
vpand %ymm8, %ymm7, %ymm7
andq %rcx, %r10
andq %rcx, %r11
vpor %ymm4, %ymm0, %ymm0
vpor %ymm5, %ymm1, %ymm1
vpor %ymm6, %ymm2, %ymm2
vpor %ymm7, %ymm3, %ymm3
orq %r10, %r8
orq %r11, %r9
decq %rax
jnz L_521_get_entry_64_avx2_9_start
vmovupd %ymm0, (%rdi)
vmovupd %ymm1, 32(%rdi)
vmovupd %ymm2, 144(%rdi)
vmovupd %ymm3, 176(%rdi)
movq %r8, 64(%rdi)
movq %r9, 208(%rdi)
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_get_entry_64_avx2_9,.-sp_521_get_entry_64_avx2_9
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* !WC_NO_CACHE_RESISTANT */
#ifndef WC_NO_CACHE_RESISTANT
/* Touch each possible entry that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of entry to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_521_get_entry_65_9
.type sp_521_get_entry_65_9,@function
.align 16
sp_521_get_entry_65_9:
#else
.section __TEXT,__text
.globl _sp_521_get_entry_65_9
.p2align 4
_sp_521_get_entry_65_9:
#endif /* __APPLE__ */
# From entry 1
movq $0x01, %r10
movq $0x01, %rax
movd %edx, %xmm13
addq $0x90, %rsi
movd %eax, %xmm15
movq $0x40, %rax
pshufd $0x00, %xmm15, %xmm15
pshufd $0x00, %xmm13, %xmm13
pxor %xmm14, %xmm14
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
xorq %r9, %r9
movdqa %xmm15, %xmm14
L_521_get_entry_65_9_start_0:
movdqa %xmm14, %xmm12
paddd %xmm15, %xmm14
pcmpeqd %xmm13, %xmm12
xorq %rcx, %rcx
cmpq %r10, %rdx
sete %cl
negq %rcx
incq %r10
movdqu (%rsi), %xmm4
movdqu 16(%rsi), %xmm5
movdqu 32(%rsi), %xmm6
movdqu 48(%rsi), %xmm7
movq 64(%rsi), %r8
addq $0x90, %rsi
pand %xmm12, %xmm4
pand %xmm12, %xmm5
pand %xmm12, %xmm6
pand %xmm12, %xmm7
andq %rcx, %r8
por %xmm4, %xmm0
por %xmm5, %xmm1
por %xmm6, %xmm2
por %xmm7, %xmm3
orq %r8, %r9
decq %rax
jnz L_521_get_entry_65_9_start_0
movdqu %xmm0, (%rdi)
movdqu %xmm1, 16(%rdi)
movdqu %xmm2, 32(%rdi)
movdqu %xmm3, 48(%rdi)
movq %r9, 64(%rdi)
# From entry 1
movq $0x01, %r10
movq $0x01, %rax
movd %edx, %xmm13
subq $0x23b8, %rsi
movd %eax, %xmm15
movq $0x40, %rax
pshufd $0x00, %xmm15, %xmm15
pshufd $0x00, %xmm13, %xmm13
pxor %xmm14, %xmm14
pxor %xmm0, %xmm0
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
xorq %r9, %r9
movdqa %xmm15, %xmm14
L_521_get_entry_65_9_start_1:
movdqa %xmm14, %xmm12
paddd %xmm15, %xmm14
pcmpeqd %xmm13, %xmm12
xorq %rcx, %rcx
cmpq %r10, %rdx
sete %cl
negq %rcx
incq %r10
movdqu (%rsi), %xmm4
movdqu 16(%rsi), %xmm5
movdqu 32(%rsi), %xmm6
movdqu 48(%rsi), %xmm7
movq 64(%rsi), %r8
addq $0x90, %rsi
pand %xmm12, %xmm4
pand %xmm12, %xmm5
pand %xmm12, %xmm6
pand %xmm12, %xmm7
andq %rcx, %r8
por %xmm4, %xmm0
por %xmm5, %xmm1
por %xmm6, %xmm2
por %xmm7, %xmm3
orq %r8, %r9
decq %rax
jnz L_521_get_entry_65_9_start_1
movdqu %xmm0, 144(%rdi)
movdqu %xmm1, 160(%rdi)
movdqu %xmm2, 176(%rdi)
movdqu %xmm3, 192(%rdi)
movq %r9, 208(%rdi)
repz retq
#ifndef __APPLE__
.size sp_521_get_entry_65_9,.-sp_521_get_entry_65_9
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Touch each possible entry that could be being copied.
*
* r Point to copy into.
* table Table - start of the entries to access
* idx Index of entry to retrieve.
*/
#ifndef __APPLE__
.text
.globl sp_521_get_entry_65_avx2_9
.type sp_521_get_entry_65_avx2_9,@function
.align 16
sp_521_get_entry_65_avx2_9:
#else
.section __TEXT,__text
.globl _sp_521_get_entry_65_avx2_9
.p2align 4
_sp_521_get_entry_65_avx2_9:
#endif /* __APPLE__ */
pushq %r12
movq $0x01, %r12
movq $0x01, %rax
movd %edx, %xmm9
addq $0x90, %rsi
movd %eax, %xmm11
movq $0x41, %rax
vpxor %ymm10, %ymm10, %ymm10
vpermd %ymm9, %ymm10, %ymm9
vpermd %ymm11, %ymm10, %ymm11
vpxor %ymm0, %ymm0, %ymm0
vpxor %ymm1, %ymm1, %ymm1
vpxor %ymm2, %ymm2, %ymm2
vpxor %ymm3, %ymm3, %ymm3
xorq %r8, %r8
xorq %r9, %r9
vmovdqa %ymm11, %ymm10
L_521_get_entry_65_avx2_9_start:
vpcmpeqd %ymm9, %ymm10, %ymm8
vpaddd %ymm11, %ymm10, %ymm10
xorq %rcx, %rcx
cmpq %r12, %rdx
sete %cl
negq %rcx
incq %r12
vmovupd (%rsi), %ymm4
vmovupd 32(%rsi), %ymm5
vmovupd 72(%rsi), %ymm6
vmovupd 104(%rsi), %ymm7
movq 64(%rsi), %r10
movq 136(%rsi), %r11
addq $0x90, %rsi
vpand %ymm8, %ymm4, %ymm4
vpand %ymm8, %ymm5, %ymm5
vpand %ymm8, %ymm6, %ymm6
vpand %ymm8, %ymm7, %ymm7
andq %rcx, %r10
andq %rcx, %r11
vpor %ymm4, %ymm0, %ymm0
vpor %ymm5, %ymm1, %ymm1
vpor %ymm6, %ymm2, %ymm2
vpor %ymm7, %ymm3, %ymm3
orq %r10, %r8
orq %r11, %r9
decq %rax
jnz L_521_get_entry_65_avx2_9_start
vmovupd %ymm0, (%rdi)
vmovupd %ymm1, 32(%rdi)
vmovupd %ymm2, 144(%rdi)
vmovupd %ymm3, 176(%rdi)
movq %r8, 64(%rdi)
movq %r9, 208(%rdi)
popq %r12
repz retq
#ifndef __APPLE__
.size sp_521_get_entry_65_avx2_9,.-sp_521_get_entry_65_avx2_9
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* !WC_NO_CACHE_RESISTANT */
/* Add 1 to a. (a = a + 1)
*
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_521_add_one_9
.type sp_521_add_one_9,@function
.align 16
sp_521_add_one_9:
#else
.section __TEXT,__text
.globl _sp_521_add_one_9
.p2align 4
_sp_521_add_one_9:
#endif /* __APPLE__ */
addq $0x01, (%rdi)
adcq $0x00, 8(%rdi)
adcq $0x00, 16(%rdi)
adcq $0x00, 24(%rdi)
adcq $0x00, 32(%rdi)
adcq $0x00, 40(%rdi)
adcq $0x00, 48(%rdi)
adcq $0x00, 56(%rdi)
adcq $0x00, 64(%rdi)
repz retq
#ifndef __APPLE__
.size sp_521_add_one_9,.-sp_521_add_one_9
#endif /* __APPLE__ */
/* Read big endian unsigned byte array into r.
* Uses the bswap instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_521_from_bin_bswap
.type sp_521_from_bin_bswap,@function
.align 16
sp_521_from_bin_bswap:
#else
.section __TEXT,__text
.globl _sp_521_from_bin_bswap
.p2align 4
_sp_521_from_bin_bswap:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $0x41, %r10
xorq %r11, %r11
jmp L_521_from_bin_bswap_64_end
L_521_from_bin_bswap_64_start:
subq $0x40, %r9
movq 56(%r9), %rax
movq 48(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq 40(%r9), %rax
movq 32(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movq 24(%r9), %rax
movq 16(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq 8(%r9), %rax
movq (%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_521_from_bin_bswap_64_end:
cmpq $63, %rcx
jg L_521_from_bin_bswap_64_start
jmp L_521_from_bin_bswap_8_end
L_521_from_bin_bswap_8_start:
subq $8, %r9
movq (%r9), %rax
bswapq %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_521_from_bin_bswap_8_end:
cmpq $7, %rcx
jg L_521_from_bin_bswap_8_start
cmpq %r11, %rcx
je L_521_from_bin_bswap_hi_end
movq %r11, %r8
movq %r11, %rax
L_521_from_bin_bswap_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_521_from_bin_bswap_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_521_from_bin_bswap_hi_end:
cmpq %r10, %rdi
jge L_521_from_bin_bswap_zero_end
L_521_from_bin_bswap_zero_start:
movq %r11, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_521_from_bin_bswap_zero_start
L_521_from_bin_bswap_zero_end:
repz retq
#ifndef __APPLE__
.size sp_521_from_bin_bswap,.-sp_521_from_bin_bswap
#endif /* __APPLE__ */
#ifndef NO_MOVBE_SUPPORT
/* Read big endian unsigned byte array into r.
* Uses the movbe instruction which is an optional instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_521_from_bin_movbe
.type sp_521_from_bin_movbe,@function
.align 16
sp_521_from_bin_movbe:
#else
.section __TEXT,__text
.globl _sp_521_from_bin_movbe
.p2align 4
_sp_521_from_bin_movbe:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $0x42, %r10
jmp L_521_from_bin_movbe_64_end
L_521_from_bin_movbe_64_start:
subq $0x40, %r9
movbeq 56(%r9), %rax
movbeq 48(%r9), %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movbeq 40(%r9), %rax
movbeq 32(%r9), %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movbeq 24(%r9), %rax
movbeq 16(%r9), %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movbeq 8(%r9), %rax
movbeq (%r9), %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_521_from_bin_movbe_64_end:
cmpq $63, %rcx
jg L_521_from_bin_movbe_64_start
jmp L_521_from_bin_movbe_8_end
L_521_from_bin_movbe_8_start:
subq $8, %r9
movbeq (%r9), %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_521_from_bin_movbe_8_end:
cmpq $7, %rcx
jg L_521_from_bin_movbe_8_start
cmpq $0x00, %rcx
je L_521_from_bin_movbe_hi_end
movq $0x00, %r8
movq $0x00, %rax
L_521_from_bin_movbe_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_521_from_bin_movbe_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_521_from_bin_movbe_hi_end:
cmpq %r10, %rdi
jge L_521_from_bin_movbe_zero_end
L_521_from_bin_movbe_zero_start:
movq $0x00, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_521_from_bin_movbe_zero_start
L_521_from_bin_movbe_zero_end:
repz retq
#ifndef __APPLE__
.size sp_521_from_bin_movbe,.-sp_521_from_bin_movbe
#endif /* __APPLE__ */
#endif /* !NO_MOVBE_SUPPORT */
/* Write r as big endian to byte array.
* Fixed length number of bytes written: 65
* Uses the bswap instruction.
*
* r A single precision integer.
* a Byte array.
*/
#ifndef __APPLE__
.text
.globl sp_521_to_bin_bswap_9
.type sp_521_to_bin_bswap_9,@function
.align 16
sp_521_to_bin_bswap_9:
#else
.section __TEXT,__text
.globl _sp_521_to_bin_bswap_9
.p2align 4
_sp_521_to_bin_bswap_9:
#endif /* __APPLE__ */
movb 64(%rdi), %al
movb 65(%rdi), %dl
movb %dl, (%rsi)
movb %al, 1(%rsi)
movq 56(%rdi), %rdx
movq 48(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 2(%rsi)
movq %rax, 10(%rsi)
movq 40(%rdi), %rdx
movq 32(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 18(%rsi)
movq %rax, 26(%rsi)
movq 24(%rdi), %rdx
movq 16(%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 34(%rsi)
movq %rax, 42(%rsi)
movq 8(%rdi), %rdx
movq (%rdi), %rax
bswapq %rdx
bswapq %rax
movq %rdx, 50(%rsi)
movq %rax, 58(%rsi)
repz retq
#ifndef __APPLE__
.size sp_521_to_bin_bswap_9,.-sp_521_to_bin_bswap_9
#endif /* __APPLE__ */
#ifndef NO_MOVBE_SUPPORT
/* Write r as big endian to byte array.
* Fixed length number of bytes written: 65
* Uses the movbe instruction which is optional.
*
* r A single precision integer.
* a Byte array.
*/
#ifndef __APPLE__
.text
.globl sp_521_to_bin_movbe_9
.type sp_521_to_bin_movbe_9,@function
.align 16
sp_521_to_bin_movbe_9:
#else
.section __TEXT,__text
.globl _sp_521_to_bin_movbe_9
.p2align 4
_sp_521_to_bin_movbe_9:
#endif /* __APPLE__ */
movb 64(%rdi), %al
movb 65(%rdi), %dl
movb %dl, (%rsi)
movb %al, 1(%rsi)
movbeq 56(%rdi), %rdx
movbeq 48(%rdi), %rax
movq %rdx, 2(%rsi)
movq %rax, 10(%rsi)
movbeq 40(%rdi), %rdx
movbeq 32(%rdi), %rax
movq %rdx, 18(%rsi)
movq %rax, 26(%rsi)
movbeq 24(%rdi), %rdx
movbeq 16(%rdi), %rax
movq %rdx, 34(%rsi)
movq %rax, 42(%rsi)
movbeq 8(%rdi), %rdx
movbeq (%rdi), %rax
movq %rdx, 50(%rsi)
movq %rax, 58(%rsi)
repz retq
#ifndef __APPLE__
.size sp_521_to_bin_movbe_9,.-sp_521_to_bin_movbe_9
#endif /* __APPLE__ */
#endif /* NO_MOVBE_SUPPORT */
/* Shift number right by 1 bit. (r = a >> 1)
*
* r Result of right shift by 1.
* a Number to shift.
*/
#ifndef __APPLE__
.text
.globl sp_521_rshift_9
.type sp_521_rshift_9,@function
.align 16
sp_521_rshift_9:
#else
.section __TEXT,__text
.globl _sp_521_rshift_9
.p2align 4
_sp_521_rshift_9:
#endif /* __APPLE__ */
movq %rdx, %rcx
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %r8
movq 24(%rsi), %r9
movq 32(%rsi), %r10
shrdq %cl, %rax, %rdx
shrdq %cl, %r8, %rax
shrdq %cl, %r9, %r8
shrdq %cl, %r10, %r9
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 40(%rsi), %rax
movq 48(%rsi), %r8
movq 56(%rsi), %r9
movq 64(%rsi), %rdx
shrdq %cl, %rax, %r10
shrdq %cl, %r8, %rax
shrdq %cl, %r9, %r8
shrdq %cl, %rdx, %r9
movq %r10, 32(%rdi)
movq %rax, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
shrq %cl, %rdx
movq %rdx, 64(%rdi)
repz retq
#ifndef __APPLE__
.size sp_521_rshift_9,.-sp_521_rshift_9
#endif /* __APPLE__ */
/* Shift number left by n bit. (r = a << n)
*
* r Result of left shift by n.
* a Number to shift.
* n Amoutnt o shift.
*/
#ifndef __APPLE__
.text
.globl sp_521_lshift_9
.type sp_521_lshift_9,@function
.align 16
sp_521_lshift_9:
#else
.section __TEXT,__text
.globl _sp_521_lshift_9
.p2align 4
_sp_521_lshift_9:
#endif /* __APPLE__ */
movb %dl, %cl
movq $0x00, %r10
movq 32(%rsi), %r11
movq 40(%rsi), %rdx
movq 48(%rsi), %rax
movq 56(%rsi), %r8
movq 64(%rsi), %r9
shldq %cl, %r9, %r10
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 40(%rdi)
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
movq %r9, 64(%rdi)
movq %r10, 72(%rdi)
movq (%rsi), %r9
movq 8(%rsi), %rdx
movq 16(%rsi), %rax
movq 24(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 8(%rdi)
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movq %r11, 32(%rdi)
shlq %cl, %r9
movq %r9, (%rdi)
repz retq
#ifndef __APPLE__
.size sp_521_lshift_9,.-sp_521_lshift_9
#endif /* __APPLE__ */
/* Shift number left by n bit. (r = a << n)
*
* r Result of left shift by n.
* a Number to shift.
* n Amoutnt o shift.
*/
#ifndef __APPLE__
.text
.globl sp_521_lshift_18
.type sp_521_lshift_18,@function
.align 16
sp_521_lshift_18:
#else
.section __TEXT,__text
.globl _sp_521_lshift_18
.p2align 4
_sp_521_lshift_18:
#endif /* __APPLE__ */
movb %dl, %cl
movq $0x00, %r10
movq 104(%rsi), %r11
movq 112(%rsi), %rdx
movq 120(%rsi), %rax
movq 128(%rsi), %r8
movq 136(%rsi), %r9
shldq %cl, %r9, %r10
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 112(%rdi)
movq %rax, 120(%rdi)
movq %r8, 128(%rdi)
movq %r9, 136(%rdi)
movq %r10, 144(%rdi)
movq 72(%rsi), %r9
movq 80(%rsi), %rdx
movq 88(%rsi), %rax
movq 96(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 80(%rdi)
movq %rax, 88(%rdi)
movq %r8, 96(%rdi)
movq %r11, 104(%rdi)
movq 40(%rsi), %r11
movq 48(%rsi), %rdx
movq 56(%rsi), %rax
movq 64(%rsi), %r8
shldq %cl, %r8, %r9
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r11, %rdx
movq %rdx, 48(%rdi)
movq %rax, 56(%rdi)
movq %r8, 64(%rdi)
movq %r9, 72(%rdi)
movq 8(%rsi), %r9
movq 16(%rsi), %rdx
movq 24(%rsi), %rax
movq 32(%rsi), %r8
shldq %cl, %r8, %r11
shldq %cl, %rax, %r8
shldq %cl, %rdx, %rax
shldq %cl, %r9, %rdx
movq %rdx, 16(%rdi)
movq %rax, 24(%rdi)
movq %r8, 32(%rdi)
movq %r11, 40(%rdi)
movq (%rsi), %r8
shldq %cl, %r8, %r9
shlq %cl, %r8
movq %r8, (%rdi)
movq %r9, 8(%rdi)
repz retq
#ifndef __APPLE__
.size sp_521_lshift_18,.-sp_521_lshift_18
#endif /* __APPLE__ */
/* Sub b from a into a. (a -= b)
*
* a A single precision integer and result.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_521_sub_in_place_9
.type sp_521_sub_in_place_9,@function
.align 16
sp_521_sub_in_place_9:
#else
.section __TEXT,__text
.globl _sp_521_sub_in_place_9
.p2align 4
_sp_521_sub_in_place_9:
#endif /* __APPLE__ */
movq (%rdi), %rdx
subq (%rsi), %rdx
movq 8(%rdi), %rcx
movq %rdx, (%rdi)
sbbq 8(%rsi), %rcx
movq 16(%rdi), %rdx
movq %rcx, 8(%rdi)
sbbq 16(%rsi), %rdx
movq 24(%rdi), %rcx
movq %rdx, 16(%rdi)
sbbq 24(%rsi), %rcx
movq 32(%rdi), %rdx
movq %rcx, 24(%rdi)
sbbq 32(%rsi), %rdx
movq 40(%rdi), %rcx
movq %rdx, 32(%rdi)
sbbq 40(%rsi), %rcx
movq 48(%rdi), %rdx
movq %rcx, 40(%rdi)
sbbq 48(%rsi), %rdx
movq 56(%rdi), %rcx
movq %rdx, 48(%rdi)
sbbq 56(%rsi), %rcx
movq 64(%rdi), %rdx
movq %rcx, 56(%rdi)
sbbq 64(%rsi), %rdx
movq %rdx, 64(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_521_sub_in_place_9,.-sp_521_sub_in_place_9
#endif /* __APPLE__ */
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_521_mul_d_9
.type sp_521_mul_d_9,@function
.align 16
sp_521_mul_d_9:
#else
.section __TEXT,__text
.globl _sp_521_mul_d_9
.p2align 4
_sp_521_mul_d_9:
#endif /* __APPLE__ */
movq %rdx, %rcx
# A[0] * B
movq %rcx, %rax
xorq %r10, %r10
mulq (%rsi)
movq %rax, %r8
movq %rdx, %r9
movq %r8, (%rdi)
# A[1] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 8(%rsi)
addq %rax, %r9
movq %r9, 8(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 16(%rsi)
addq %rax, %r10
movq %r10, 16(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 24(%rsi)
addq %rax, %r8
movq %r8, 24(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 32(%rsi)
addq %rax, %r9
movq %r9, 32(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 40(%rsi)
addq %rax, %r10
movq %r10, 40(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 48(%rsi)
addq %rax, %r8
movq %r8, 48(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 56(%rsi)
addq %rax, %r9
movq %r9, 56(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B
movq %rcx, %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
movq %r10, 64(%rdi)
movq %r8, 72(%rdi)
repz retq
#ifndef __APPLE__
.size sp_521_mul_d_9,.-sp_521_mul_d_9
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_521_mul_d_avx2_9
.type sp_521_mul_d_avx2_9,@function
.align 16
sp_521_mul_d_avx2_9:
#else
.section __TEXT,__text
.globl _sp_521_mul_d_avx2_9
.p2align 4
_sp_521_mul_d_avx2_9:
#endif /* __APPLE__ */
movq %rdx, %rax
# A[0] * B
movq %rax, %rdx
xorq %r11, %r11
mulxq (%rsi), %r9, %r10
movq %r9, (%rdi)
# A[1] * B
mulxq 8(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 8(%rdi)
# A[2] * B
mulxq 16(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 16(%rdi)
# A[3] * B
mulxq 24(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 24(%rdi)
# A[4] * B
mulxq 32(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 32(%rdi)
# A[5] * B
mulxq 40(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 40(%rdi)
# A[6] * B
mulxq 48(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 48(%rdi)
# A[7] * B
mulxq 56(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 56(%rdi)
# A[8] * B
mulxq 64(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
adcxq %r11, %r10
movq %r9, 64(%rdi)
movq %r10, 72(%rdi)
repz retq
#ifndef __APPLE__
.size sp_521_mul_d_avx2_9,.-sp_521_mul_d_avx2_9
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef _WIN64
/* Divide the double width number (d1|d0) by the dividend. (d1|d0 / div)
*
* d1 The high order half of the number to divide.
* d0 The low order half of the number to divide.
* div The dividend.
* returns the result of the division.
*/
#ifndef __APPLE__
.text
.globl div_521_word_asm_9
.type div_521_word_asm_9,@function
.align 16
div_521_word_asm_9:
#else
.section __TEXT,__text
.globl _div_521_word_asm_9
.p2align 4
_div_521_word_asm_9:
#endif /* __APPLE__ */
movq %rdx, %rcx
movq %rsi, %rax
movq %rdi, %rdx
divq %rcx
repz retq
#ifndef __APPLE__
.size div_521_word_asm_9,.-div_521_word_asm_9
#endif /* __APPLE__ */
#endif /* _WIN64 */
/* Shift number right by 1 bit. (r = a >> 1)
*
* r Result of right shift by 1.
* a Number to shift.
*/
#ifndef __APPLE__
.text
.globl sp_521_rshift1_9
.type sp_521_rshift1_9,@function
.align 16
sp_521_rshift1_9:
#else
.section __TEXT,__text
.globl _sp_521_rshift1_9
.p2align 4
_sp_521_rshift1_9:
#endif /* __APPLE__ */
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
movq 32(%rsi), %r10
shrdq $0x01, %rax, %rdx
shrdq $0x01, %rcx, %rax
shrdq $0x01, %r8, %rcx
shrdq $0x01, %r10, %r8
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %rcx, 16(%rdi)
movq %r8, 24(%rdi)
movq 40(%rsi), %rax
movq 48(%rsi), %rcx
movq 56(%rsi), %r8
movq 64(%rsi), %rdx
shrdq $0x01, %rax, %r10
shrdq $0x01, %rcx, %rax
shrdq $0x01, %r8, %rcx
shrdq $0x01, %rdx, %r8
movq %r10, 32(%rdi)
movq %rax, 40(%rdi)
movq %rcx, 48(%rdi)
movq %r8, 56(%rdi)
shrq $0x01, %rdx
movq %rdx, 64(%rdi)
repz retq
#ifndef __APPLE__
.size sp_521_rshift1_9,.-sp_521_rshift1_9
#endif /* __APPLE__ */
/* Divide the number by 2 mod the prime. (r = a / 2 % m)
*
* r Result of division by 2.
* a Number to divide.
* m Modulus
*/
#ifndef __APPLE__
.text
.globl sp_521_div2_mod_9
.type sp_521_div2_mod_9,@function
.align 16
sp_521_div2_mod_9:
#else
.section __TEXT,__text
.globl _sp_521_div2_mod_9
.p2align 4
_sp_521_div2_mod_9:
#endif /* __APPLE__ */
movq (%rsi), %rax
andq $0x01, %rax
je L_521_mod_inv_9_div2_mod_no_add
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq (%rdx), %r8
movq 8(%rdx), %r9
addq %r8, %rax
adcq %r9, %rcx
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq 16(%rsi), %rax
movq 24(%rsi), %rcx
movq 16(%rdx), %r8
movq 24(%rdx), %r9
adcq %r8, %rax
adcq %r9, %rcx
movq %rax, 16(%rdi)
movq %rcx, 24(%rdi)
movq 32(%rsi), %rax
movq 40(%rsi), %rcx
movq 32(%rdx), %r8
movq 40(%rdx), %r9
adcq %r8, %rax
adcq %r9, %rcx
movq %rax, 32(%rdi)
movq %rcx, 40(%rdi)
movq 48(%rsi), %rax
movq 56(%rsi), %rcx
movq 48(%rdx), %r8
movq 56(%rdx), %r9
adcq %r8, %rax
adcq %r9, %rcx
movq %rax, 48(%rdi)
movq %rcx, 56(%rdi)
movq 64(%rsi), %rax
movq 64(%rdx), %r8
adcq %r8, %rax
movq %rax, 64(%rdi)
L_521_mod_inv_9_div2_mod_no_add:
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
movq 32(%rsi), %r10
shrdq $0x01, %rcx, %rax
shrdq $0x01, %r8, %rcx
shrdq $0x01, %r9, %r8
shrdq $0x01, %r10, %r9
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 40(%rsi), %rcx
movq 48(%rsi), %r8
movq 56(%rsi), %r9
movq 64(%rsi), %rax
shrdq $0x01, %rcx, %r10
shrdq $0x01, %r8, %rcx
shrdq $0x01, %r9, %r8
shrdq $0x01, %rax, %r9
movq %r10, 32(%rdi)
movq %rcx, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
shrq $0x01, %rax
movq %rax, 64(%rdi)
repz retq
#ifndef __APPLE__
.size sp_521_div2_mod_9,.-sp_521_div2_mod_9
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl sp_521_num_bits_9
.type sp_521_num_bits_9,@function
.align 16
sp_521_num_bits_9:
#else
.section __TEXT,__text
.globl _sp_521_num_bits_9
.p2align 4
_sp_521_num_bits_9:
#endif /* __APPLE__ */
xorq %rax, %rax
movq 64(%rdi), %rdx
cmpq $0x00, %rdx
je L_521_num_bits_9_end_512
movq $-1, %rax
bsr %rdx, %rax
addq $0x201, %rax
jmp L_521_num_bits_9_done
L_521_num_bits_9_end_512:
movq 56(%rdi), %rdx
cmpq $0x00, %rdx
je L_521_num_bits_9_end_448
movq $-1, %rax
bsr %rdx, %rax
addq $0x1c1, %rax
jmp L_521_num_bits_9_done
L_521_num_bits_9_end_448:
movq 48(%rdi), %rdx
cmpq $0x00, %rdx
je L_521_num_bits_9_end_384
movq $-1, %rax
bsr %rdx, %rax
addq $0x181, %rax
jmp L_521_num_bits_9_done
L_521_num_bits_9_end_384:
movq 40(%rdi), %rdx
cmpq $0x00, %rdx
je L_521_num_bits_9_end_320
movq $-1, %rax
bsr %rdx, %rax
addq $0x141, %rax
jmp L_521_num_bits_9_done
L_521_num_bits_9_end_320:
movq 32(%rdi), %rdx
cmpq $0x00, %rdx
je L_521_num_bits_9_end_256
movq $-1, %rax
bsr %rdx, %rax
addq $0x101, %rax
jmp L_521_num_bits_9_done
L_521_num_bits_9_end_256:
movq 24(%rdi), %rdx
cmpq $0x00, %rdx
je L_521_num_bits_9_end_192
movq $-1, %rax
bsr %rdx, %rax
addq $0xc1, %rax
jmp L_521_num_bits_9_done
L_521_num_bits_9_end_192:
movq 16(%rdi), %rdx
cmpq $0x00, %rdx
je L_521_num_bits_9_end_128
movq $-1, %rax
bsr %rdx, %rax
addq $0x81, %rax
jmp L_521_num_bits_9_done
L_521_num_bits_9_end_128:
movq 8(%rdi), %rdx
cmpq $0x00, %rdx
je L_521_num_bits_9_end_64
movq $-1, %rax
bsr %rdx, %rax
addq $0x41, %rax
jmp L_521_num_bits_9_done
L_521_num_bits_9_end_64:
movq (%rdi), %rdx
cmpq $0x00, %rdx
je L_521_num_bits_9_end_0
movq $-1, %rax
bsr %rdx, %rax
addq $0x01, %rax
jmp L_521_num_bits_9_done
L_521_num_bits_9_end_0:
L_521_num_bits_9_done:
repz retq
#ifndef __APPLE__
.size sp_521_num_bits_9,.-sp_521_num_bits_9
#endif /* __APPLE__ */
#endif /* WOLFSSL_SP_521 */
#ifdef WOLFSSL_SP_1024
/* Multiply a and b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_1024_mul_16
.type sp_1024_mul_16,@function
.align 16
sp_1024_mul_16:
#else
.section __TEXT,__text
.globl _sp_1024_mul_16
.p2align 4
_sp_1024_mul_16:
#endif /* __APPLE__ */
movq %rdx, %rcx
subq $0x80, %rsp
# A[0] * B[0]
movq (%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
movq %rax, (%rsp)
movq %rdx, %r9
# A[0] * B[1]
movq 8(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[0]
movq (%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 8(%rsp)
# A[0] * B[2]
movq 16(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[1]
movq 8(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[0]
movq (%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 16(%rsp)
# A[0] * B[3]
movq 24(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[2]
movq 16(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[1]
movq 8(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[0]
movq (%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 24(%rsp)
# A[0] * B[4]
movq 32(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[3]
movq 24(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[2]
movq 16(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[1]
movq 8(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[0]
movq (%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 32(%rsp)
# A[0] * B[5]
movq 40(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[4]
movq 32(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[3]
movq 24(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[2]
movq 16(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[1]
movq 8(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[0]
movq (%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 40(%rsp)
# A[0] * B[6]
movq 48(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[5]
movq 40(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[4]
movq 32(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[3]
movq 24(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[2]
movq 16(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[1]
movq 8(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[0]
movq (%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 48(%rsp)
# A[0] * B[7]
movq 56(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[6]
movq 48(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[5]
movq 40(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[4]
movq 32(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[3]
movq 24(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[2]
movq 16(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[1]
movq 8(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[0]
movq (%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 56(%rsp)
# A[0] * B[8]
movq 64(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[7]
movq 56(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[6]
movq 48(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[5]
movq 40(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[4]
movq 32(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[3]
movq 24(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[2]
movq 16(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[1]
movq 8(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[0]
movq (%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 64(%rsp)
# A[0] * B[9]
movq 72(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[8]
movq 64(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[7]
movq 56(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[6]
movq 48(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[5]
movq 40(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[4]
movq 32(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[3]
movq 24(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[2]
movq 16(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[1]
movq 8(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[0]
movq (%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 72(%rsp)
# A[0] * B[10]
movq 80(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[9]
movq 72(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[8]
movq 64(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[7]
movq 56(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[6]
movq 48(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[5]
movq 40(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[4]
movq 32(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[3]
movq 24(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[2]
movq 16(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[1]
movq 8(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[0]
movq (%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 80(%rsp)
# A[0] * B[11]
movq 88(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[10]
movq 80(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[9]
movq 72(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[8]
movq 64(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[7]
movq 56(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[6]
movq 48(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[5]
movq 40(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[4]
movq 32(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[3]
movq 24(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B[2]
movq 16(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[1]
movq 8(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[0]
movq (%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 88(%rsp)
# A[0] * B[12]
movq 96(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[11]
movq 88(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[10]
movq 80(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[9]
movq 72(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[8]
movq 64(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[7]
movq 56(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[6]
movq 48(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[5]
movq 40(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[4]
movq 32(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[3]
movq 24(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B[2]
movq 16(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[1]
movq 8(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[12] * B[0]
movq (%rcx), %rax
mulq 96(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 96(%rsp)
# A[0] * B[13]
movq 104(%rcx), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[1] * B[12]
movq 96(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[11]
movq 88(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[10]
movq 80(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[9]
movq 72(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[8]
movq 64(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[7]
movq 56(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[6]
movq 48(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[5]
movq 40(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[4]
movq 32(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[3]
movq 24(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B[2]
movq 16(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[12] * B[1]
movq 8(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[13] * B[0]
movq (%rcx), %rax
mulq 104(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 104(%rsp)
# A[0] * B[14]
movq 112(%rcx), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * B[13]
movq 104(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[2] * B[12]
movq 96(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[11]
movq 88(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[10]
movq 80(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[9]
movq 72(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[8]
movq 64(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[7]
movq 56(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[6]
movq 48(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B[5]
movq 40(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[4]
movq 32(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[3]
movq 24(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B[2]
movq 16(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[13] * B[1]
movq 8(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[14] * B[0]
movq (%rcx), %rax
mulq 112(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 112(%rsp)
# A[0] * B[15]
movq 120(%rcx), %rax
mulq (%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[1] * B[14]
movq 112(%rcx), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[2] * B[13]
movq 104(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[3] * B[12]
movq 96(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[11]
movq 88(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[10]
movq 80(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[9]
movq 72(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[8]
movq 64(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[7]
movq 56(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[6]
movq 48(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B[5]
movq 40(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[4]
movq 32(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[12] * B[3]
movq 24(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B[2]
movq 16(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[14] * B[1]
movq 8(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[15] * B[0]
movq (%rcx), %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 120(%rsp)
# A[1] * B[15]
movq 120(%rcx), %rax
mulq 8(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B[14]
movq 112(%rcx), %rax
mulq 16(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[3] * B[13]
movq 104(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[4] * B[12]
movq 96(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[11]
movq 88(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[10]
movq 80(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[9]
movq 72(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[8]
movq 64(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[7]
movq 56(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[6]
movq 48(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B[5]
movq 40(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[12] * B[4]
movq 32(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[13] * B[3]
movq 24(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B[2]
movq 16(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[15] * B[1]
movq 8(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 128(%rdi)
# A[2] * B[15]
movq 120(%rcx), %rax
mulq 16(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B[14]
movq 112(%rcx), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[4] * B[13]
movq 104(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[5] * B[12]
movq 96(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[11]
movq 88(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[10]
movq 80(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[9]
movq 72(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B[8]
movq 64(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[7]
movq 56(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[6]
movq 48(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B[5]
movq 40(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[13] * B[4]
movq 32(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[14] * B[3]
movq 24(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B[2]
movq 16(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 136(%rdi)
# A[3] * B[15]
movq 120(%rcx), %rax
mulq 24(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B[14]
movq 112(%rcx), %rax
mulq 32(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[5] * B[13]
movq 104(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[6] * B[12]
movq 96(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[11]
movq 88(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[10]
movq 80(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[9]
movq 72(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B[8]
movq 64(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[7]
movq 56(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[12] * B[6]
movq 48(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B[5]
movq 40(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[14] * B[4]
movq 32(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[15] * B[3]
movq 24(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 144(%rdi)
# A[4] * B[15]
movq 120(%rcx), %rax
mulq 32(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B[14]
movq 112(%rcx), %rax
mulq 40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[6] * B[13]
movq 104(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[7] * B[12]
movq 96(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[11]
movq 88(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[10]
movq 80(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[9]
movq 72(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B[8]
movq 64(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[12] * B[7]
movq 56(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[13] * B[6]
movq 48(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B[5]
movq 40(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[15] * B[4]
movq 32(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 152(%rdi)
# A[5] * B[15]
movq 120(%rcx), %rax
mulq 40(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B[14]
movq 112(%rcx), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[7] * B[13]
movq 104(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[8] * B[12]
movq 96(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B[11]
movq 88(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[10]
movq 80(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[9]
movq 72(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B[8]
movq 64(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[13] * B[7]
movq 56(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[14] * B[6]
movq 48(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B[5]
movq 40(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 160(%rdi)
# A[6] * B[15]
movq 120(%rcx), %rax
mulq 48(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B[14]
movq 112(%rcx), %rax
mulq 56(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[8] * B[13]
movq 104(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[9] * B[12]
movq 96(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B[11]
movq 88(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[10]
movq 80(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[12] * B[9]
movq 72(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B[8]
movq 64(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[14] * B[7]
movq 56(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[15] * B[6]
movq 48(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 168(%rdi)
# A[7] * B[15]
movq 120(%rcx), %rax
mulq 56(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B[14]
movq 112(%rcx), %rax
mulq 64(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[9] * B[13]
movq 104(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[10] * B[12]
movq 96(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B[11]
movq 88(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[12] * B[10]
movq 80(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[13] * B[9]
movq 72(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B[8]
movq 64(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[15] * B[7]
movq 56(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 176(%rdi)
# A[8] * B[15]
movq 120(%rcx), %rax
mulq 64(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B[14]
movq 112(%rcx), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[10] * B[13]
movq 104(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[11] * B[12]
movq 96(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B[11]
movq 88(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[13] * B[10]
movq 80(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[14] * B[9]
movq 72(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B[8]
movq 64(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 184(%rdi)
# A[9] * B[15]
movq 120(%rcx), %rax
mulq 72(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B[14]
movq 112(%rcx), %rax
mulq 80(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[11] * B[13]
movq 104(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[12] * B[12]
movq 96(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B[11]
movq 88(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[14] * B[10]
movq 80(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[15] * B[9]
movq 72(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 192(%rdi)
# A[10] * B[15]
movq 120(%rcx), %rax
mulq 80(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B[14]
movq 112(%rcx), %rax
mulq 88(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[12] * B[13]
movq 104(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[13] * B[12]
movq 96(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B[11]
movq 88(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[15] * B[10]
movq 80(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 200(%rdi)
# A[11] * B[15]
movq 120(%rcx), %rax
mulq 88(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B[14]
movq 112(%rcx), %rax
mulq 96(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[13] * B[13]
movq 104(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[14] * B[12]
movq 96(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B[11]
movq 88(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 208(%rdi)
# A[12] * B[15]
movq 120(%rcx), %rax
mulq 96(%rsi)
xorq %r10, %r10
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B[14]
movq 112(%rcx), %rax
mulq 104(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[14] * B[13]
movq 104(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
# A[15] * B[12]
movq 96(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r10
movq %r8, 216(%rdi)
# A[13] * B[15]
movq 120(%rcx), %rax
mulq 104(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B[14]
movq 112(%rcx), %rax
mulq 112(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
# A[15] * B[13]
movq 104(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq $0x00, %r8
movq %r9, 224(%rdi)
# A[14] * B[15]
movq 120(%rcx), %rax
mulq 112(%rsi)
xorq %r9, %r9
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B[14]
movq 112(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r10
adcq %rdx, %r8
adcq $0x00, %r9
movq %r10, 232(%rdi)
# A[15] * B[15]
movq 120(%rcx), %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
movq %r8, 240(%rdi)
movq %r9, 248(%rdi)
movq (%rsp), %rax
movq 8(%rsp), %rdx
movq 16(%rsp), %r8
movq 24(%rsp), %r9
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rsp), %rax
movq 40(%rsp), %rdx
movq 48(%rsp), %r8
movq 56(%rsp), %r9
movq %rax, 32(%rdi)
movq %rdx, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rsp), %rax
movq 72(%rsp), %rdx
movq 80(%rsp), %r8
movq 88(%rsp), %r9
movq %rax, 64(%rdi)
movq %rdx, 72(%rdi)
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
movq 96(%rsp), %rax
movq 104(%rsp), %rdx
movq 112(%rsp), %r8
movq 120(%rsp), %r9
movq %rax, 96(%rdi)
movq %rdx, 104(%rdi)
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
addq $0x80, %rsp
repz retq
#ifndef __APPLE__
.size sp_1024_mul_16,.-sp_1024_mul_16
#endif /* __APPLE__ */
/* Square a and put result in r. (r = a * a)
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_1024_sqr_16
.type sp_1024_sqr_16,@function
.align 16
sp_1024_sqr_16:
#else
.section __TEXT,__text
.globl _sp_1024_sqr_16
.p2align 4
_sp_1024_sqr_16:
#endif /* __APPLE__ */
pushq %r12
subq $0x80, %rsp
# A[0] * A[0]
movq (%rsi), %rax
mulq %rax
xorq %r9, %r9
movq %rax, (%rsp)
movq %rdx, %r8
# A[0] * A[1]
movq 8(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 8(%rsp)
# A[0] * A[2]
movq 16(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
# A[1] * A[1]
movq 8(%rsi), %rax
mulq %rax
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 16(%rsp)
# A[0] * A[3]
movq 24(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[1] * A[2]
movq 16(%rsi), %rax
mulq 8(%rsi)
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 24(%rsp)
# A[0] * A[4]
movq 32(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[1] * A[3]
movq 24(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[2] * A[2]
movq 16(%rsi), %rax
mulq %rax
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 32(%rsp)
# A[0] * A[5]
movq 40(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[4]
movq 32(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[3]
movq 24(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 40(%rsp)
# A[0] * A[6]
movq 48(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[5]
movq 40(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[4]
movq 32(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[3]
movq 24(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 48(%rsp)
# A[0] * A[7]
movq 56(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[6]
movq 48(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[5]
movq 40(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[4]
movq 32(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 56(%rsp)
# A[0] * A[8]
movq 64(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[7]
movq 56(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[6]
movq 48(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[5]
movq 40(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[4]
movq 32(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 64(%rsp)
# A[0] * A[9]
movq 72(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[8]
movq 64(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[7]
movq 56(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[6]
movq 48(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[5]
movq 40(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 72(%rsp)
# A[0] * A[10]
movq 80(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[9]
movq 72(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[8]
movq 64(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[7]
movq 56(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[6]
movq 48(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[5]
movq 40(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 80(%rsp)
# A[0] * A[11]
movq 88(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[10]
movq 80(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[9]
movq 72(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[8]
movq 64(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[7]
movq 56(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[6]
movq 48(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 88(%rsp)
# A[0] * A[12]
movq 96(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[11]
movq 88(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[10]
movq 80(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[9]
movq 72(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[8]
movq 64(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[7]
movq 56(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[6]
movq 48(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 96(%rsp)
# A[0] * A[13]
movq 104(%rsi), %rax
mulq (%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[12]
movq 96(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[11]
movq 88(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[10]
movq 80(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[9]
movq 72(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[8]
movq 64(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[7]
movq 56(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 104(%rsp)
# A[0] * A[14]
movq 112(%rsi), %rax
mulq (%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[13]
movq 104(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[12]
movq 96(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[11]
movq 88(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[10]
movq 80(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[9]
movq 72(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[8]
movq 64(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[7]
movq 56(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 112(%rsp)
# A[0] * A[15]
movq 120(%rsi), %rax
mulq (%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[1] * A[14]
movq 112(%rsi), %rax
mulq 8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[2] * A[13]
movq 104(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[12]
movq 96(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[11]
movq 88(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[10]
movq 80(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[9]
movq 72(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[8]
movq 64(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 120(%rsp)
# A[1] * A[15]
movq 120(%rsi), %rax
mulq 8(%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[2] * A[14]
movq 112(%rsi), %rax
mulq 16(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[3] * A[13]
movq 104(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[12]
movq 96(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[11]
movq 88(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[10]
movq 80(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[9]
movq 72(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[8]
movq 64(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 128(%rdi)
# A[2] * A[15]
movq 120(%rsi), %rax
mulq 16(%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[3] * A[14]
movq 112(%rsi), %rax
mulq 24(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[4] * A[13]
movq 104(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[12]
movq 96(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[11]
movq 88(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[10]
movq 80(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[9]
movq 72(%rsi), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 136(%rdi)
# A[3] * A[15]
movq 120(%rsi), %rax
mulq 24(%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[4] * A[14]
movq 112(%rsi), %rax
mulq 32(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[5] * A[13]
movq 104(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[12]
movq 96(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[11]
movq 88(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[10]
movq 80(%rsi), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[9] * A[9]
movq 72(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 144(%rdi)
# A[4] * A[15]
movq 120(%rsi), %rax
mulq 32(%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[5] * A[14]
movq 112(%rsi), %rax
mulq 40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[6] * A[13]
movq 104(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[12]
movq 96(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[11]
movq 88(%rsi), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[9] * A[10]
movq 80(%rsi), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 152(%rdi)
# A[5] * A[15]
movq 120(%rsi), %rax
mulq 40(%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[6] * A[14]
movq 112(%rsi), %rax
mulq 48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[7] * A[13]
movq 104(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[12]
movq 96(%rsi), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[9] * A[11]
movq 88(%rsi), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[10] * A[10]
movq 80(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 160(%rdi)
# A[6] * A[15]
movq 120(%rsi), %rax
mulq 48(%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[7] * A[14]
movq 112(%rsi), %rax
mulq 56(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[8] * A[13]
movq 104(%rsi), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[9] * A[12]
movq 96(%rsi), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[10] * A[11]
movq 88(%rsi), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 168(%rdi)
# A[7] * A[15]
movq 120(%rsi), %rax
mulq 56(%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[8] * A[14]
movq 112(%rsi), %rax
mulq 64(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[9] * A[13]
movq 104(%rsi), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[10] * A[12]
movq 96(%rsi), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[11] * A[11]
movq 88(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 176(%rdi)
# A[8] * A[15]
movq 120(%rsi), %rax
mulq 64(%rsi)
xorq %r8, %r8
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[9] * A[14]
movq 112(%rsi), %rax
mulq 72(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[10] * A[13]
movq 104(%rsi), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[11] * A[12]
movq 96(%rsi), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r9
adcq %r11, %rcx
adcq %r12, %r8
movq %r9, 184(%rdi)
# A[9] * A[15]
movq 120(%rsi), %rax
mulq 72(%rsi)
xorq %r9, %r9
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[10] * A[14]
movq 112(%rsi), %rax
mulq 80(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[11] * A[13]
movq 104(%rsi), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[12] * A[12]
movq 96(%rsi), %rax
mulq %rax
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %rcx
adcq %r11, %r8
adcq %r12, %r9
movq %rcx, 192(%rdi)
# A[10] * A[15]
movq 120(%rsi), %rax
mulq 80(%rsi)
xorq %rcx, %rcx
xorq %r12, %r12
movq %rax, %r10
movq %rdx, %r11
# A[11] * A[14]
movq 112(%rsi), %rax
mulq 88(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
# A[12] * A[13]
movq 104(%rsi), %rax
mulq 96(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
addq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
addq %r10, %r8
adcq %r11, %r9
adcq %r12, %rcx
movq %r8, 200(%rdi)
# A[11] * A[15]
movq 120(%rsi), %rax
mulq 88(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
# A[12] * A[14]
movq 112(%rsi), %rax
mulq 96(%rsi)
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
# A[13] * A[13]
movq 104(%rsi), %rax
mulq %rax
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 208(%rdi)
# A[12] * A[15]
movq 120(%rsi), %rax
mulq 96(%rsi)
xorq %r9, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
# A[13] * A[14]
movq 112(%rsi), %rax
mulq 104(%rsi)
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq $0x00, %r9
movq %rcx, 216(%rdi)
# A[13] * A[15]
movq 120(%rsi), %rax
mulq 104(%rsi)
xorq %rcx, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
# A[14] * A[14]
movq 112(%rsi), %rax
mulq %rax
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %rcx
movq %r8, 224(%rdi)
# A[14] * A[15]
movq 120(%rsi), %rax
mulq 112(%rsi)
xorq %r8, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
addq %rax, %r9
adcq %rdx, %rcx
adcq $0x00, %r8
movq %r9, 232(%rdi)
# A[15] * A[15]
movq 120(%rsi), %rax
mulq %rax
addq %rax, %rcx
adcq %rdx, %r8
movq %rcx, 240(%rdi)
movq %r8, 248(%rdi)
movq (%rsp), %rax
movq 8(%rsp), %rdx
movq 16(%rsp), %r10
movq 24(%rsp), %r11
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq 32(%rsp), %rax
movq 40(%rsp), %rdx
movq 48(%rsp), %r10
movq 56(%rsp), %r11
movq %rax, 32(%rdi)
movq %rdx, 40(%rdi)
movq %r10, 48(%rdi)
movq %r11, 56(%rdi)
movq 64(%rsp), %rax
movq 72(%rsp), %rdx
movq 80(%rsp), %r10
movq 88(%rsp), %r11
movq %rax, 64(%rdi)
movq %rdx, 72(%rdi)
movq %r10, 80(%rdi)
movq %r11, 88(%rdi)
movq 96(%rsp), %rax
movq 104(%rsp), %rdx
movq 112(%rsp), %r10
movq 120(%rsp), %r11
movq %rax, 96(%rdi)
movq %rdx, 104(%rdi)
movq %r10, 112(%rdi)
movq %r11, 120(%rdi)
addq $0x80, %rsp
popq %r12
repz retq
#ifndef __APPLE__
.size sp_1024_sqr_16,.-sp_1024_sqr_16
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Multiply a and b into r. (r = a * b)
*
* r Result of multiplication.
* a First number to multiply.
* b Second number to multiply.
*/
#ifndef __APPLE__
.text
.globl sp_1024_mul_avx2_16
.type sp_1024_mul_avx2_16,@function
.align 16
sp_1024_mul_avx2_16:
#else
.section __TEXT,__text
.globl _sp_1024_mul_avx2_16
.p2align 4
_sp_1024_mul_avx2_16:
#endif /* __APPLE__ */
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
movq %rdx, %rbp
subq $0x80, %rsp
cmpq %rdi, %rsi
movq %rsp, %rbx
cmovne %rdi, %rbx
cmpq %rdi, %rbp
cmove %rsp, %rbx
addq $0x80, %rdi
xorq %r14, %r14
movq (%rsi), %rdx
# A[0] * B[0]
mulx (%rbp), %r8, %r9
# A[0] * B[1]
mulx 8(%rbp), %rax, %r10
movq %r8, (%rbx)
adcxq %rax, %r9
# A[0] * B[2]
mulx 16(%rbp), %rax, %r11
movq %r9, 8(%rbx)
adcxq %rax, %r10
# A[0] * B[3]
mulx 24(%rbp), %rax, %r12
movq %r10, 16(%rbx)
adcxq %rax, %r11
movq %r11, 24(%rbx)
# A[0] * B[4]
mulx 32(%rbp), %rax, %r8
adcxq %rax, %r12
# A[0] * B[5]
mulx 40(%rbp), %rax, %r9
movq %r12, 32(%rbx)
adcxq %rax, %r8
# A[0] * B[6]
mulx 48(%rbp), %rax, %r10
movq %r8, 40(%rbx)
adcxq %rax, %r9
# A[0] * B[7]
mulx 56(%rbp), %rax, %r11
movq %r9, 48(%rbx)
adcxq %rax, %r10
movq %r10, 56(%rbx)
# A[0] * B[8]
mulx 64(%rbp), %rax, %r12
adcxq %rax, %r11
# A[0] * B[9]
mulx 72(%rbp), %rax, %r8
movq %r11, 64(%rbx)
adcxq %rax, %r12
# A[0] * B[10]
mulx 80(%rbp), %rax, %r9
movq %r12, 72(%rbx)
adcxq %rax, %r8
# A[0] * B[11]
mulx 88(%rbp), %rax, %r10
movq %r8, 80(%rbx)
adcxq %rax, %r9
movq %r9, 88(%rbx)
# A[0] * B[12]
mulx 96(%rbp), %rax, %r11
adcxq %rax, %r10
# A[0] * B[13]
mulx 104(%rbp), %rax, %r12
movq %r10, 96(%rbx)
adcxq %rax, %r11
# A[0] * B[14]
mulx 112(%rbp), %rax, %r8
movq %r11, 104(%rbx)
adcxq %rax, %r12
# A[0] * B[15]
mulx 120(%rbp), %rax, %r9
movq %r12, 112(%rbx)
adcxq %rax, %r8
adcxq %r14, %r9
movq %r14, %r13
adcxq %r14, %r13
movq %r8, 120(%rbx)
movq %r9, (%rdi)
movq 8(%rsi), %rdx
movq 8(%rbx), %r9
movq 16(%rbx), %r10
movq 24(%rbx), %r11
movq 32(%rbx), %r12
movq 40(%rbx), %r8
# A[1] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[1] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 8(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[1] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r10, 16(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[1] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r11, 24(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 32(%rbx)
movq 48(%rbx), %r9
movq 56(%rbx), %r10
movq 64(%rbx), %r11
movq 72(%rbx), %r12
# A[1] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[1] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, 40(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[1] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r9, 48(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[1] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 56(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 64(%rbx)
movq 80(%rbx), %r8
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
# A[1] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[1] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[1] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[1] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 96(%rbx)
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
# A[1] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[1] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[1] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[1] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
movq %r14, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r13, %r10
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq 16(%rsi), %rdx
movq 16(%rbx), %r10
movq 24(%rbx), %r11
movq 32(%rbx), %r12
movq 40(%rbx), %r8
movq 48(%rbx), %r9
# A[2] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[2] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 16(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[2] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r11, 24(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[2] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r12, 32(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 40(%rbx)
movq 56(%rbx), %r10
movq 64(%rbx), %r11
movq 72(%rbx), %r12
movq 80(%rbx), %r8
# A[2] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[2] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r9, 48(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[2] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r10, 56(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[2] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r11, 64(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 72(%rbx)
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
# A[2] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[2] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[2] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[2] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 104(%rbx)
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[2] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[2] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[2] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[2] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r9, (%rdi)
movq %r14, %r11
adcxq %rax, %r10
adoxq %rcx, %r11
adcxq %r13, %r11
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq 24(%rsi), %rdx
movq 24(%rbx), %r11
movq 32(%rbx), %r12
movq 40(%rbx), %r8
movq 48(%rbx), %r9
movq 56(%rbx), %r10
# A[3] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[3] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r11, 24(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[3] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r12, 32(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[3] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r8, 40(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 48(%rbx)
movq 64(%rbx), %r11
movq 72(%rbx), %r12
movq 80(%rbx), %r8
movq 88(%rbx), %r9
# A[3] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[3] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 56(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[3] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r11, 64(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[3] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 80(%rbx)
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
# A[3] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[3] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[3] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[3] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 112(%rbx)
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
# A[3] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[3] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[3] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[3] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
movq %r14, %r12
adcxq %rax, %r11
adoxq %rcx, %r12
adcxq %r13, %r12
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r11, 16(%rdi)
movq %r12, 24(%rdi)
movq 32(%rsi), %rdx
movq 32(%rbx), %r12
movq 40(%rbx), %r8
movq 48(%rbx), %r9
movq 56(%rbx), %r10
movq 64(%rbx), %r11
# A[4] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[4] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r12, 32(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[4] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r8, 40(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[4] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r9, 48(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 56(%rbx)
movq 72(%rbx), %r12
movq 80(%rbx), %r8
movq 88(%rbx), %r9
movq 96(%rbx), %r10
# A[4] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[4] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r11, 64(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[4] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[4] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 88(%rbx)
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
# A[4] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[4] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[4] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[4] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 120(%rbx)
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
# A[4] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[4] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[4] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[4] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
movq %r14, %r8
adcxq %rax, %r12
adoxq %rcx, %r8
adcxq %r13, %r8
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r12, 24(%rdi)
movq %r8, 32(%rdi)
movq 40(%rsi), %rdx
movq 40(%rbx), %r8
movq 48(%rbx), %r9
movq 56(%rbx), %r10
movq 64(%rbx), %r11
movq 72(%rbx), %r12
# A[5] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r8, 40(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[5] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r9, 48(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[5] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r10, 56(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 64(%rbx)
movq 80(%rbx), %r8
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
# A[5] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[5] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[5] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 96(%rbx)
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[5] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[5] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[5] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, (%rdi)
movq 16(%rdi), %r11
movq 24(%rdi), %r12
movq 32(%rdi), %r8
# A[5] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[5] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[5] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[5] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
movq %r14, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r13, %r9
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r8, 32(%rdi)
movq %r9, 40(%rdi)
movq 48(%rsi), %rdx
movq 48(%rbx), %r9
movq 56(%rbx), %r10
movq 64(%rbx), %r11
movq 72(%rbx), %r12
movq 80(%rbx), %r8
# A[6] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[6] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 48(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[6] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r10, 56(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[6] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r11, 64(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 72(%rbx)
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
# A[6] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[6] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[6] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[6] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 104(%rbx)
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
# A[6] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[6] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[6] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[6] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 8(%rdi)
movq 24(%rdi), %r12
movq 32(%rdi), %r8
movq 40(%rdi), %r9
# A[6] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[6] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[6] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[6] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
movq %r14, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r13, %r10
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r9, 40(%rdi)
movq %r10, 48(%rdi)
movq 56(%rsi), %rdx
movq 56(%rbx), %r10
movq 64(%rbx), %r11
movq 72(%rbx), %r12
movq 80(%rbx), %r8
movq 88(%rbx), %r9
# A[7] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[7] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 56(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[7] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r11, 64(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[7] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 80(%rbx)
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
# A[7] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[7] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[7] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[7] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 112(%rbx)
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
# A[7] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[7] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[7] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[7] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 16(%rdi)
movq 32(%rdi), %r8
movq 40(%rdi), %r9
movq 48(%rdi), %r10
# A[7] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[7] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[7] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[7] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
movq %r14, %r11
adcxq %rax, %r10
adoxq %rcx, %r11
adcxq %r13, %r11
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r10, 48(%rdi)
movq %r11, 56(%rdi)
movq 64(%rsi), %rdx
movq 64(%rbx), %r11
movq 72(%rbx), %r12
movq 80(%rbx), %r8
movq 88(%rbx), %r9
movq 96(%rbx), %r10
# A[8] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[8] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r11, 64(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[8] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[8] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 88(%rbx)
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
# A[8] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[8] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[8] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[8] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 120(%rbx)
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
movq 32(%rdi), %r8
# A[8] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[8] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[8] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[8] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 24(%rdi)
movq 40(%rdi), %r9
movq 48(%rdi), %r10
movq 56(%rdi), %r11
# A[8] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[8] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[8] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[8] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r10, 48(%rdi)
movq %r14, %r12
adcxq %rax, %r11
adoxq %rcx, %r12
adcxq %r13, %r12
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r11, 56(%rdi)
movq %r12, 64(%rdi)
movq 72(%rsi), %rdx
movq 72(%rbx), %r12
movq 80(%rbx), %r8
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
# A[9] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[9] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r12, 72(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[9] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[9] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 96(%rbx)
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[9] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[9] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[9] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[9] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, (%rdi)
movq 16(%rdi), %r11
movq 24(%rdi), %r12
movq 32(%rdi), %r8
movq 40(%rdi), %r9
# A[9] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[9] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[9] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[9] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 32(%rdi)
movq 48(%rdi), %r10
movq 56(%rdi), %r11
movq 64(%rdi), %r12
# A[9] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[9] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[9] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r10, 48(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[9] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r11, 56(%rdi)
movq %r14, %r8
adcxq %rax, %r12
adoxq %rcx, %r8
adcxq %r13, %r8
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r12, 64(%rdi)
movq %r8, 72(%rdi)
movq 80(%rsi), %rdx
movq 80(%rbx), %r8
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
# A[10] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[10] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r8, 80(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[10] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[10] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 104(%rbx)
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
# A[10] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[10] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[10] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[10] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 8(%rdi)
movq 24(%rdi), %r12
movq 32(%rdi), %r8
movq 40(%rdi), %r9
movq 48(%rdi), %r10
# A[10] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[10] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[10] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[10] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 40(%rdi)
movq 56(%rdi), %r11
movq 64(%rdi), %r12
movq 72(%rdi), %r8
# A[10] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[10] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r10, 48(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[10] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r11, 56(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[10] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r12, 64(%rdi)
movq %r14, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r13, %r9
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r8, 72(%rdi)
movq %r9, 80(%rdi)
movq 88(%rsi), %rdx
movq 88(%rbx), %r9
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
# A[11] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[11] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r9, 88(%rbx)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[11] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[11] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 112(%rbx)
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
# A[11] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[11] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[11] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[11] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 16(%rdi)
movq 32(%rdi), %r8
movq 40(%rdi), %r9
movq 48(%rdi), %r10
movq 56(%rdi), %r11
# A[11] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[11] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[11] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[11] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rdi)
movq 64(%rdi), %r12
movq 72(%rdi), %r8
movq 80(%rdi), %r9
# A[11] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[11] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r11, 56(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[11] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r12, 64(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[11] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r8, 72(%rdi)
movq %r14, %r10
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %r13, %r10
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r9, 80(%rdi)
movq %r10, 88(%rdi)
movq 96(%rsi), %rdx
movq 96(%rbx), %r10
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
# A[12] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[12] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r10, 96(%rbx)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[12] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[12] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 120(%rbx)
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
movq 32(%rdi), %r8
# A[12] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[12] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[12] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[12] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 24(%rdi)
movq 40(%rdi), %r9
movq 48(%rdi), %r10
movq 56(%rdi), %r11
movq 64(%rdi), %r12
# A[12] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[12] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[12] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[12] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r10, 48(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 56(%rdi)
movq 72(%rdi), %r8
movq 80(%rdi), %r9
movq 88(%rdi), %r10
# A[12] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[12] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r12, 64(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[12] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r8, 72(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[12] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r9, 80(%rdi)
movq %r14, %r11
adcxq %rax, %r10
adoxq %rcx, %r11
adcxq %r13, %r11
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r10, 88(%rdi)
movq %r11, 96(%rdi)
movq 104(%rsi), %rdx
movq 104(%rbx), %r11
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[13] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[13] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r11, 104(%rbx)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[13] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[13] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, (%rdi)
movq 16(%rdi), %r11
movq 24(%rdi), %r12
movq 32(%rdi), %r8
movq 40(%rdi), %r9
# A[13] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[13] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[13] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[13] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 32(%rdi)
movq 48(%rdi), %r10
movq 56(%rdi), %r11
movq 64(%rdi), %r12
movq 72(%rdi), %r8
# A[13] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[13] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[13] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r10, 48(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[13] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r11, 56(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
movq %r12, 64(%rdi)
movq 80(%rdi), %r9
movq 88(%rdi), %r10
movq 96(%rdi), %r11
# A[13] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[13] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r8, 72(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[13] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r9, 80(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[13] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r10, 88(%rdi)
movq %r14, %r12
adcxq %rax, %r11
adoxq %rcx, %r12
adcxq %r13, %r12
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r11, 96(%rdi)
movq %r12, 104(%rdi)
movq 112(%rsi), %rdx
movq 112(%rbx), %r12
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
# A[14] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[14] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r12, 112(%rbx)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[14] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[14] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 8(%rdi)
movq 24(%rdi), %r12
movq 32(%rdi), %r8
movq 40(%rdi), %r9
movq 48(%rdi), %r10
# A[14] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[14] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r11, 16(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[14] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[14] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 40(%rdi)
movq 56(%rdi), %r11
movq 64(%rdi), %r12
movq 72(%rdi), %r8
movq 80(%rdi), %r9
# A[14] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[14] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r10, 48(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[14] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r11, 56(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[14] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r12, 64(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r8, 72(%rdi)
movq 88(%rdi), %r10
movq 96(%rdi), %r11
movq 104(%rdi), %r12
# A[14] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[14] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r9, 80(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[14] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r10, 88(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[14] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r11, 96(%rdi)
movq %r14, %r8
adcxq %rax, %r12
adoxq %rcx, %r8
adcxq %r13, %r8
movq %r14, %r13
adoxq %r14, %r13
adcxq %r14, %r13
movq %r12, 104(%rdi)
movq %r8, 112(%rdi)
movq 120(%rsi), %rdx
movq 120(%rbx), %r8
movq (%rdi), %r9
movq 8(%rdi), %r10
movq 16(%rdi), %r11
movq 24(%rdi), %r12
# A[15] * B[0]
mulx (%rbp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[15] * B[1]
mulx 8(%rbp), %rax, %rcx
movq %r8, 120(%rbx)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[15] * B[2]
mulx 16(%rbp), %rax, %rcx
movq %r9, (%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
# A[15] * B[3]
mulx 24(%rbp), %rax, %rcx
movq %r10, 8(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
movq %r11, 16(%rdi)
movq 32(%rdi), %r8
movq 40(%rdi), %r9
movq 48(%rdi), %r10
movq 56(%rdi), %r11
# A[15] * B[4]
mulx 32(%rbp), %rax, %rcx
adcxq %rax, %r12
adoxq %rcx, %r8
# A[15] * B[5]
mulx 40(%rbp), %rax, %rcx
movq %r12, 24(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[15] * B[6]
mulx 48(%rbp), %rax, %rcx
movq %r8, 32(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
# A[15] * B[7]
mulx 56(%rbp), %rax, %rcx
movq %r9, 40(%rdi)
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rdi)
movq 64(%rdi), %r12
movq 72(%rdi), %r8
movq 80(%rdi), %r9
movq 88(%rdi), %r10
# A[15] * B[8]
mulx 64(%rbp), %rax, %rcx
adcxq %rax, %r11
adoxq %rcx, %r12
# A[15] * B[9]
mulx 72(%rbp), %rax, %rcx
movq %r11, 56(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[15] * B[10]
mulx 80(%rbp), %rax, %rcx
movq %r12, 64(%rdi)
adcxq %rax, %r8
adoxq %rcx, %r9
# A[15] * B[11]
mulx 88(%rbp), %rax, %rcx
movq %r8, 72(%rdi)
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r9, 80(%rdi)
movq 96(%rdi), %r11
movq 104(%rdi), %r12
movq 112(%rdi), %r8
# A[15] * B[12]
mulx 96(%rbp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
# A[15] * B[13]
mulx 104(%rbp), %rax, %rcx
movq %r10, 88(%rdi)
adcxq %rax, %r11
adoxq %rcx, %r12
# A[15] * B[14]
mulx 112(%rbp), %rax, %rcx
movq %r11, 96(%rdi)
adcxq %rax, %r12
adoxq %rcx, %r8
# A[15] * B[15]
mulx 120(%rbp), %rax, %rcx
movq %r12, 104(%rdi)
movq %r14, %r9
adcxq %rax, %r8
adoxq %rcx, %r9
adcxq %r13, %r9
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
subq $0x80, %rdi
cmpq %rdi, %rsi
je L_start_1024_mul_avx2_16
cmpq %rdi, %rbp
jne L_end_1024_mul_avx2_16
L_start_1024_mul_avx2_16:
vmovdqu (%rbx), %xmm0
vmovups %xmm0, (%rdi)
vmovdqu 16(%rbx), %xmm0
vmovups %xmm0, 16(%rdi)
vmovdqu 32(%rbx), %xmm0
vmovups %xmm0, 32(%rdi)
vmovdqu 48(%rbx), %xmm0
vmovups %xmm0, 48(%rdi)
vmovdqu 64(%rbx), %xmm0
vmovups %xmm0, 64(%rdi)
vmovdqu 80(%rbx), %xmm0
vmovups %xmm0, 80(%rdi)
vmovdqu 96(%rbx), %xmm0
vmovups %xmm0, 96(%rdi)
vmovdqu 112(%rbx), %xmm0
vmovups %xmm0, 112(%rdi)
L_end_1024_mul_avx2_16:
addq $0x80, %rsp
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
repz retq
#ifndef __APPLE__
.size sp_1024_mul_avx2_16,.-sp_1024_mul_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Square a and put result in r. (r = a * a)
*
* r A single precision integer.
* a A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_1024_sqr_avx2_16
.type sp_1024_sqr_avx2_16,@function
.align 16
sp_1024_sqr_avx2_16:
#else
.section __TEXT,__text
.globl _sp_1024_sqr_avx2_16
.p2align 4
_sp_1024_sqr_avx2_16:
#endif /* __APPLE__ */
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
subq $0x80, %rsp
cmpq %rdi, %rsi
movq %rsp, %rbp
cmovne %rdi, %rbp
addq $0x80, %rdi
xorq %r11, %r11
# Diagonal 1
# Zero into %r9
# Zero into %r10
# A[1] x A[0]
movq (%rsi), %rdx
mulxq 8(%rsi), %r8, %r9
# A[2] x A[0]
mulxq 16(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 8(%rbp)
movq %r9, 16(%rbp)
# Zero into %r8
# Zero into %r9
# A[3] x A[0]
mulxq 24(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
# A[4] x A[0]
mulxq 32(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r11, %r9
movq %r10, 24(%rbp)
movq %r8, 32(%rbp)
# Zero into %r10
# Zero into %r8
# A[5] x A[0]
mulxq 40(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
# A[6] x A[0]
mulxq 48(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r9, 40(%rbp)
movq %r10, 48(%rbp)
# Zero into %r9
# Zero into %r10
# A[7] x A[0]
mulxq 56(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r11, %r9
# A[8] x A[0]
mulxq 64(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 56(%rbp)
movq %r9, 64(%rbp)
# Zero into %r8
# Zero into %r9
# A[9] x A[0]
mulxq 72(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
# A[10] x A[0]
mulxq 80(%rsi), %rax, %r9
adcxq %rax, %r8
adoxq %r11, %r9
movq %r10, 72(%rbp)
movq %r8, 80(%rbp)
# No load %r13 - %r10
# A[11] x A[0]
mulxq 88(%rsi), %rax, %r13
adcxq %rax, %r9
adoxq %r11, %r13
# A[12] x A[0]
mulxq 96(%rsi), %rax, %r14
adcxq %rax, %r13
adoxq %r11, %r14
movq %r9, 88(%rbp)
# No store %r13 - %r10
# No load %r15 - %r9
# A[13] x A[0]
mulxq 104(%rsi), %rax, %r15
adcxq %rax, %r14
adoxq %r11, %r15
# A[14] x A[0]
mulxq 112(%rsi), %rax, %rbx
adcxq %rax, %r15
adoxq %r11, %rbx
# No store %r14 - %r8
# No store %r15 - %r9
# Zero into %r8
# Zero into %r9
# A[15] x A[0]
mulxq 120(%rsi), %rax, %r8
adcxq %rax, %rbx
adoxq %r11, %r8
# No store %rbx - %r10
# Carry
adcxq %r11, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, (%rdi)
# Diagonal 2
movq 24(%rbp), %r8
movq 32(%rbp), %r9
movq 40(%rbp), %r10
# A[2] x A[1]
movq 8(%rsi), %rdx
mulxq 16(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[3] x A[1]
mulxq 24(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 24(%rbp)
movq %r9, 32(%rbp)
movq 48(%rbp), %r8
movq 56(%rbp), %r9
# A[4] x A[1]
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[5] x A[1]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 40(%rbp)
movq %r8, 48(%rbp)
movq 64(%rbp), %r10
movq 72(%rbp), %r8
# A[6] x A[1]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[7] x A[1]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 56(%rbp)
movq %r10, 64(%rbp)
movq 80(%rbp), %r9
movq 88(%rbp), %r10
# A[8] x A[1]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[9] x A[1]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 72(%rbp)
movq %r9, 80(%rbp)
# No load %r13 - %r8
# A[10] x A[1]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r13
# A[11] x A[1]
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
movq %r10, 88(%rbp)
# No store %r13 - %r8
# No load %r15 - %r10
# A[12] x A[1]
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# A[13] x A[1]
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r14 - %r9
# No store %r15 - %r10
movq (%rdi), %r9
# Zero into %r10
# A[14] x A[1]
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r9
# A[15] x A[1]
mulxq 120(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
# No store %rbx - %r8
movq %r9, (%rdi)
# Zero into %r8
# Zero into %r9
# A[15] x A[2]
movq 16(%rsi), %rdx
mulxq 120(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 8(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 16(%rdi)
# Diagonal 3
movq 40(%rbp), %r8
movq 48(%rbp), %r9
movq 56(%rbp), %r10
# A[3] x A[2]
mulxq 24(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[4] x A[2]
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 40(%rbp)
movq %r9, 48(%rbp)
movq 64(%rbp), %r8
movq 72(%rbp), %r9
# A[5] x A[2]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[6] x A[2]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 56(%rbp)
movq %r8, 64(%rbp)
movq 80(%rbp), %r10
movq 88(%rbp), %r8
# A[7] x A[2]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[8] x A[2]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 72(%rbp)
movq %r10, 80(%rbp)
# No load %r13 - %r9
# A[9] x A[2]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r13
# A[10] x A[2]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
movq %r8, 88(%rbp)
# No store %r13 - %r9
# No load %r15 - %r8
# A[11] x A[2]
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# A[12] x A[2]
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r14 - %r10
# No store %r15 - %r8
movq (%rdi), %r10
movq 8(%rdi), %r8
# A[13] x A[2]
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r10
# A[14] x A[2]
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# No store %rbx - %r9
movq %r10, (%rdi)
movq 16(%rdi), %r9
# Zero into %r10
# A[14] x A[3]
movq 24(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[14] x A[4]
movq 32(%rsi), %rdx
mulxq 112(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
# Zero into %r8
# Zero into %r9
# A[14] x A[5]
movq 40(%rsi), %rdx
mulxq 112(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 24(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 32(%rdi)
# Diagonal 4
movq 56(%rbp), %r8
movq 64(%rbp), %r9
movq 72(%rbp), %r10
# A[4] x A[3]
movq 24(%rsi), %rdx
mulxq 32(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[5] x A[3]
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 56(%rbp)
movq %r9, 64(%rbp)
movq 80(%rbp), %r8
movq 88(%rbp), %r9
# A[6] x A[3]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[7] x A[3]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 72(%rbp)
movq %r8, 80(%rbp)
# No load %r13 - %r10
# A[8] x A[3]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r13
# A[9] x A[3]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
movq %r9, 88(%rbp)
# No store %r13 - %r10
# No load %r15 - %r9
# A[10] x A[3]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# A[11] x A[3]
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r14 - %r8
# No store %r15 - %r9
movq (%rdi), %r8
movq 8(%rdi), %r9
# A[12] x A[3]
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r8
# A[13] x A[3]
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# No store %rbx - %r10
movq %r8, (%rdi)
movq 16(%rdi), %r10
movq 24(%rdi), %r8
# A[13] x A[4]
movq 32(%rsi), %rdx
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[13] x A[5]
movq 40(%rsi), %rdx
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq 32(%rdi), %r9
# Zero into %r10
# A[13] x A[6]
movq 48(%rsi), %rdx
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[13] x A[7]
movq 56(%rsi), %rdx
mulxq 104(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
# Zero into %r8
# Zero into %r9
# A[13] x A[8]
movq 64(%rsi), %rdx
mulxq 104(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 40(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 48(%rdi)
# Diagonal 5
movq 72(%rbp), %r8
movq 80(%rbp), %r9
movq 88(%rbp), %r10
# A[5] x A[4]
movq 32(%rsi), %rdx
mulxq 40(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[6] x A[4]
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 72(%rbp)
movq %r9, 80(%rbp)
# No load %r13 - %r8
# A[7] x A[4]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r13
# A[8] x A[4]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
movq %r10, 88(%rbp)
# No store %r13 - %r8
# No load %r15 - %r10
# A[9] x A[4]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# A[10] x A[4]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r14 - %r9
# No store %r15 - %r10
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[11] x A[4]
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r9
# A[12] x A[4]
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# No store %rbx - %r8
movq %r9, (%rdi)
movq 16(%rdi), %r8
movq 24(%rdi), %r9
# A[12] x A[5]
movq 40(%rsi), %rdx
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[12] x A[6]
movq 48(%rsi), %rdx
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 8(%rdi)
movq %r8, 16(%rdi)
movq 32(%rdi), %r10
movq 40(%rdi), %r8
# A[12] x A[7]
movq 56(%rsi), %rdx
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[12] x A[8]
movq 64(%rsi), %rdx
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 24(%rdi)
movq %r10, 32(%rdi)
movq 48(%rdi), %r9
# Zero into %r10
# A[12] x A[9]
movq 72(%rsi), %rdx
mulxq 96(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[12] x A[10]
movq 80(%rsi), %rdx
mulxq 96(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 40(%rdi)
movq %r9, 48(%rdi)
# Zero into %r8
# Zero into %r9
# A[12] x A[11]
movq 88(%rsi), %rdx
mulxq 96(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 56(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 64(%rdi)
# Diagonal 6
movq 88(%rbp), %r8
# No load %r13 - %r9
# A[6] x A[5]
movq 40(%rsi), %rdx
mulxq 48(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r13
# A[7] x A[5]
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r13
adoxq %rcx, %r14
movq %r8, 88(%rbp)
# No store %r13 - %r9
# No load %r15 - %r8
# A[8] x A[5]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# A[9] x A[5]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r14 - %r10
# No store %r15 - %r8
movq (%rdi), %r10
movq 8(%rdi), %r8
# A[10] x A[5]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r10
# A[11] x A[5]
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# No store %rbx - %r9
movq %r10, (%rdi)
movq 16(%rdi), %r9
movq 24(%rdi), %r10
# A[11] x A[6]
movq 48(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[11] x A[7]
movq 56(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq 32(%rdi), %r8
movq 40(%rdi), %r9
# A[11] x A[8]
movq 64(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[11] x A[9]
movq 72(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 24(%rdi)
movq %r8, 32(%rdi)
movq 48(%rdi), %r10
movq 56(%rdi), %r8
# A[11] x A[10]
movq 80(%rsi), %rdx
mulxq 88(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[13] x A[9]
movq 72(%rsi), %rdx
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 40(%rdi)
movq %r10, 48(%rdi)
movq 64(%rdi), %r9
# Zero into %r10
# A[13] x A[10]
movq 80(%rsi), %rdx
mulxq 104(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[13] x A[11]
movq 88(%rsi), %rdx
mulxq 104(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 56(%rdi)
movq %r9, 64(%rdi)
# Zero into %r8
# Zero into %r9
# A[13] x A[12]
movq 96(%rsi), %rdx
mulxq 104(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 72(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 80(%rdi)
# Diagonal 7
# No load %r15 - %r9
# A[7] x A[6]
movq 48(%rsi), %rdx
mulxq 56(%rsi), %rax, %rcx
adcxq %rax, %r14
adoxq %rcx, %r15
# A[8] x A[6]
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbx
# No store %r14 - %r8
# No store %r15 - %r9
movq (%rdi), %r8
movq 8(%rdi), %r9
# A[9] x A[6]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r8
# A[10] x A[6]
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# No store %rbx - %r10
movq %r8, (%rdi)
movq 16(%rdi), %r10
movq 24(%rdi), %r8
# A[10] x A[7]
movq 56(%rsi), %rdx
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[10] x A[8]
movq 64(%rsi), %rdx
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq 32(%rdi), %r9
movq 40(%rdi), %r10
# A[10] x A[9]
movq 72(%rsi), %rdx
mulxq 80(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[14] x A[6]
movq 48(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
movq 48(%rdi), %r8
movq 56(%rdi), %r9
# A[14] x A[7]
movq 56(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[14] x A[8]
movq 64(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 40(%rdi)
movq %r8, 48(%rdi)
movq 64(%rdi), %r10
movq 72(%rdi), %r8
# A[14] x A[9]
movq 72(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[14] x A[10]
movq 80(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 56(%rdi)
movq %r10, 64(%rdi)
movq 80(%rdi), %r9
# Zero into %r10
# A[14] x A[11]
movq 88(%rsi), %rdx
mulxq 112(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[14] x A[12]
movq 96(%rsi), %rdx
mulxq 112(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 72(%rdi)
movq %r9, 80(%rdi)
# Zero into %r8
# Zero into %r9
# A[14] x A[13]
movq 104(%rsi), %rdx
mulxq 112(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 88(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 96(%rdi)
# Diagonal 8
movq (%rdi), %r9
movq 8(%rdi), %r10
# A[8] x A[7]
movq 56(%rsi), %rdx
mulxq 64(%rsi), %rax, %rcx
adcxq %rax, %rbx
adoxq %rcx, %r9
# A[9] x A[7]
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# No store %rbx - %r8
movq %r9, (%rdi)
movq 16(%rdi), %r8
movq 24(%rdi), %r9
# A[9] x A[8]
movq 64(%rsi), %rdx
mulxq 72(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[15] x A[3]
movq 24(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 8(%rdi)
movq %r8, 16(%rdi)
movq 32(%rdi), %r10
movq 40(%rdi), %r8
# A[15] x A[4]
movq 32(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[15] x A[5]
movq 40(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 24(%rdi)
movq %r10, 32(%rdi)
movq 48(%rdi), %r9
movq 56(%rdi), %r10
# A[15] x A[6]
movq 48(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[15] x A[7]
movq 56(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
movq %r8, 40(%rdi)
movq %r9, 48(%rdi)
movq 64(%rdi), %r8
movq 72(%rdi), %r9
# A[15] x A[8]
movq 64(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
# A[15] x A[9]
movq 72(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movq %r10, 56(%rdi)
movq %r8, 64(%rdi)
movq 80(%rdi), %r10
movq 88(%rdi), %r8
# A[15] x A[10]
movq 80(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
# A[15] x A[11]
movq 88(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r8
movq %r9, 72(%rdi)
movq %r10, 80(%rdi)
movq 96(%rdi), %r9
# Zero into %r10
# A[15] x A[12]
movq 96(%rsi), %rdx
mulxq 120(%rsi), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
# A[15] x A[13]
movq 104(%rsi), %rdx
mulxq 120(%rsi), %rax, %r10
adcxq %rax, %r9
adoxq %r11, %r10
movq %r8, 88(%rdi)
movq %r9, 96(%rdi)
# Zero into %r8
# Zero into %r9
# A[15] x A[14]
movq 112(%rsi), %rdx
mulxq 120(%rsi), %rax, %r8
adcxq %rax, %r10
adoxq %r11, %r8
movq %r10, 104(%rdi)
# Carry
adcxq %r12, %r8
movq %r11, %r12
adcxq %r11, %r12
adoxq %r11, %r12
movq %r8, 112(%rdi)
movq %r12, 120(%rdi)
# Double and Add in A[i] x A[i]
movq 8(%rbp), %r9
# A[0] x A[0]
movq (%rsi), %rdx
mulxq %rdx, %rax, %rcx
movq %rax, (%rbp)
adoxq %r9, %r9
adcxq %rcx, %r9
movq %r9, 8(%rbp)
movq 16(%rbp), %r8
movq 24(%rbp), %r9
# A[1] x A[1]
movq 8(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 16(%rbp)
movq %r9, 24(%rbp)
movq 32(%rbp), %r8
movq 40(%rbp), %r9
# A[2] x A[2]
movq 16(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 32(%rbp)
movq %r9, 40(%rbp)
movq 48(%rbp), %r8
movq 56(%rbp), %r9
# A[3] x A[3]
movq 24(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 48(%rbp)
movq %r9, 56(%rbp)
movq 64(%rbp), %r8
movq 72(%rbp), %r9
# A[4] x A[4]
movq 32(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 64(%rbp)
movq %r9, 72(%rbp)
movq 80(%rbp), %r8
movq 88(%rbp), %r9
# A[5] x A[5]
movq 40(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 80(%rbp)
movq %r9, 88(%rbp)
# A[6] x A[6]
movq 48(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r13, %r13
adoxq %r14, %r14
adcxq %rax, %r13
adcxq %rcx, %r14
# A[7] x A[7]
movq 56(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r15, %r15
adoxq %rbx, %rbx
adcxq %rax, %r15
adcxq %rcx, %rbx
movq (%rdi), %r8
movq 8(%rdi), %r9
# A[8] x A[8]
movq 64(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq 16(%rdi), %r8
movq 24(%rdi), %r9
# A[9] x A[9]
movq 72(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rdi), %r8
movq 40(%rdi), %r9
# A[10] x A[10]
movq 80(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 32(%rdi)
movq %r9, 40(%rdi)
movq 48(%rdi), %r8
movq 56(%rdi), %r9
# A[11] x A[11]
movq 88(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rdi), %r8
movq 72(%rdi), %r9
# A[12] x A[12]
movq 96(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 64(%rdi)
movq %r9, 72(%rdi)
movq 80(%rdi), %r8
movq 88(%rdi), %r9
# A[13] x A[13]
movq 104(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
movq 96(%rdi), %r8
movq 104(%rdi), %r9
# A[14] x A[14]
movq 112(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 96(%rdi)
movq %r9, 104(%rdi)
movq 112(%rdi), %r8
movq 120(%rdi), %r9
# A[15] x A[15]
movq 120(%rsi), %rdx
mulxq %rdx, %rax, %rcx
adoxq %r8, %r8
adoxq %r9, %r9
adcxq %rax, %r8
adcxq %rcx, %r9
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
movq %r13, -32(%rdi)
movq %r14, -24(%rdi)
movq %r15, -16(%rdi)
movq %rbx, -8(%rdi)
subq $0x80, %rdi
cmpq %rdi, %rsi
jne L_end_1024_sqr_avx2_16
vmovdqu (%rbp), %xmm0
vmovups %xmm0, (%rdi)
vmovdqu 16(%rbp), %xmm0
vmovups %xmm0, 16(%rdi)
vmovdqu 32(%rbp), %xmm0
vmovups %xmm0, 32(%rdi)
vmovdqu 48(%rbp), %xmm0
vmovups %xmm0, 48(%rdi)
vmovdqu 64(%rbp), %xmm0
vmovups %xmm0, 64(%rdi)
vmovdqu 80(%rbp), %xmm0
vmovups %xmm0, 80(%rdi)
L_end_1024_sqr_avx2_16:
addq $0x80, %rsp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
repz retq
#ifndef __APPLE__
.size sp_1024_sqr_avx2_16,.-sp_1024_sqr_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Add b to a into r. (r = a + b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_1024_add_16
.type sp_1024_add_16,@function
.align 16
sp_1024_add_16:
#else
.section __TEXT,__text
.globl _sp_1024_add_16
.p2align 4
_sp_1024_add_16:
#endif /* __APPLE__ */
# Add
movq (%rsi), %rcx
xorq %rax, %rax
addq (%rdx), %rcx
movq 8(%rsi), %r8
movq %rcx, (%rdi)
adcq 8(%rdx), %r8
movq 16(%rsi), %rcx
movq %r8, 8(%rdi)
adcq 16(%rdx), %rcx
movq 24(%rsi), %r8
movq %rcx, 16(%rdi)
adcq 24(%rdx), %r8
movq 32(%rsi), %rcx
movq %r8, 24(%rdi)
adcq 32(%rdx), %rcx
movq 40(%rsi), %r8
movq %rcx, 32(%rdi)
adcq 40(%rdx), %r8
movq 48(%rsi), %rcx
movq %r8, 40(%rdi)
adcq 48(%rdx), %rcx
movq 56(%rsi), %r8
movq %rcx, 48(%rdi)
adcq 56(%rdx), %r8
movq 64(%rsi), %rcx
movq %r8, 56(%rdi)
adcq 64(%rdx), %rcx
movq 72(%rsi), %r8
movq %rcx, 64(%rdi)
adcq 72(%rdx), %r8
movq 80(%rsi), %rcx
movq %r8, 72(%rdi)
adcq 80(%rdx), %rcx
movq 88(%rsi), %r8
movq %rcx, 80(%rdi)
adcq 88(%rdx), %r8
movq 96(%rsi), %rcx
movq %r8, 88(%rdi)
adcq 96(%rdx), %rcx
movq 104(%rsi), %r8
movq %rcx, 96(%rdi)
adcq 104(%rdx), %r8
movq 112(%rsi), %rcx
movq %r8, 104(%rdi)
adcq 112(%rdx), %rcx
movq 120(%rsi), %r8
movq %rcx, 112(%rdi)
adcq 120(%rdx), %r8
movq %r8, 120(%rdi)
adcq $0x00, %rax
repz retq
#ifndef __APPLE__
.size sp_1024_add_16,.-sp_1024_add_16
#endif /* __APPLE__ */
/* Sub b from a into a. (a -= b)
*
* a A single precision integer and result.
* b A single precision integer.
*/
#ifndef __APPLE__
.text
.globl sp_1024_sub_in_place_16
.type sp_1024_sub_in_place_16,@function
.align 16
sp_1024_sub_in_place_16:
#else
.section __TEXT,__text
.globl _sp_1024_sub_in_place_16
.p2align 4
_sp_1024_sub_in_place_16:
#endif /* __APPLE__ */
movq (%rdi), %rdx
subq (%rsi), %rdx
movq 8(%rdi), %rcx
movq %rdx, (%rdi)
sbbq 8(%rsi), %rcx
movq 16(%rdi), %rdx
movq %rcx, 8(%rdi)
sbbq 16(%rsi), %rdx
movq 24(%rdi), %rcx
movq %rdx, 16(%rdi)
sbbq 24(%rsi), %rcx
movq 32(%rdi), %rdx
movq %rcx, 24(%rdi)
sbbq 32(%rsi), %rdx
movq 40(%rdi), %rcx
movq %rdx, 32(%rdi)
sbbq 40(%rsi), %rcx
movq 48(%rdi), %rdx
movq %rcx, 40(%rdi)
sbbq 48(%rsi), %rdx
movq 56(%rdi), %rcx
movq %rdx, 48(%rdi)
sbbq 56(%rsi), %rcx
movq 64(%rdi), %rdx
movq %rcx, 56(%rdi)
sbbq 64(%rsi), %rdx
movq 72(%rdi), %rcx
movq %rdx, 64(%rdi)
sbbq 72(%rsi), %rcx
movq 80(%rdi), %rdx
movq %rcx, 72(%rdi)
sbbq 80(%rsi), %rdx
movq 88(%rdi), %rcx
movq %rdx, 80(%rdi)
sbbq 88(%rsi), %rcx
movq 96(%rdi), %rdx
movq %rcx, 88(%rdi)
sbbq 96(%rsi), %rdx
movq 104(%rdi), %rcx
movq %rdx, 96(%rdi)
sbbq 104(%rsi), %rcx
movq 112(%rdi), %rdx
movq %rcx, 104(%rdi)
sbbq 112(%rsi), %rdx
movq 120(%rdi), %rcx
movq %rdx, 112(%rdi)
sbbq 120(%rsi), %rcx
movq %rcx, 120(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_1024_sub_in_place_16,.-sp_1024_sub_in_place_16
#endif /* __APPLE__ */
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_1024_cond_sub_16
.type sp_1024_cond_sub_16,@function
.align 16
sp_1024_cond_sub_16:
#else
.section __TEXT,__text
.globl _sp_1024_cond_sub_16
.p2align 4
_sp_1024_cond_sub_16:
#endif /* __APPLE__ */
subq $0x80, %rsp
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 80(%rsp)
movq %r9, 88(%rsp)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 96(%rsp)
movq %r9, 104(%rsp)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
andq %rcx, %r8
andq %rcx, %r9
movq %r8, 112(%rsp)
movq %r9, 120(%rsp)
movq (%rsi), %r8
movq (%rsp), %rdx
subq %rdx, %r8
movq 8(%rsi), %r9
movq 8(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, (%rdi)
movq 16(%rsi), %r8
movq 16(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 8(%rdi)
movq 24(%rsi), %r9
movq 24(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 16(%rdi)
movq 32(%rsi), %r8
movq 32(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 24(%rdi)
movq 40(%rsi), %r9
movq 40(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 32(%rdi)
movq 48(%rsi), %r8
movq 48(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 40(%rdi)
movq 56(%rsi), %r9
movq 56(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 48(%rdi)
movq 64(%rsi), %r8
movq 64(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 56(%rdi)
movq 72(%rsi), %r9
movq 72(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 64(%rdi)
movq 80(%rsi), %r8
movq 80(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 72(%rdi)
movq 88(%rsi), %r9
movq 88(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 80(%rdi)
movq 96(%rsi), %r8
movq 96(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 88(%rdi)
movq 104(%rsi), %r9
movq 104(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 96(%rdi)
movq 112(%rsi), %r8
movq 112(%rsp), %rdx
sbbq %rdx, %r8
movq %r9, 104(%rdi)
movq 120(%rsi), %r9
movq 120(%rsp), %rdx
sbbq %rdx, %r9
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
sbbq %rax, %rax
addq $0x80, %rsp
repz retq
#ifndef __APPLE__
.size sp_1024_cond_sub_16,.-sp_1024_cond_sub_16
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Conditionally subtract b from a using the mask m.
* m is -1 to subtract and 0 when not copying.
*
* r A single precision number representing condition subtract result.
* a A single precision number to subtract from.
* b A single precision number to subtract.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_1024_cond_sub_avx2_16
.type sp_1024_cond_sub_avx2_16,@function
.align 16
sp_1024_cond_sub_avx2_16:
#else
.section __TEXT,__text
.globl _sp_1024_cond_sub_avx2_16
.p2align 4
_sp_1024_cond_sub_avx2_16:
#endif /* __APPLE__ */
movq (%rdx), %r10
movq (%rsi), %r8
pextq %rcx, %r10, %r10
subq %r10, %r8
movq 8(%rdx), %r10
movq 8(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, (%rdi)
sbbq %r10, %r9
movq 16(%rdx), %r8
movq 16(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 8(%rdi)
sbbq %r8, %r10
movq 24(%rdx), %r9
movq 24(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 16(%rdi)
sbbq %r9, %r8
movq 32(%rdx), %r10
movq 32(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 24(%rdi)
sbbq %r10, %r9
movq 40(%rdx), %r8
movq 40(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 32(%rdi)
sbbq %r8, %r10
movq 48(%rdx), %r9
movq 48(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 40(%rdi)
sbbq %r9, %r8
movq 56(%rdx), %r10
movq 56(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 48(%rdi)
sbbq %r10, %r9
movq 64(%rdx), %r8
movq 64(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 56(%rdi)
sbbq %r8, %r10
movq 72(%rdx), %r9
movq 72(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 64(%rdi)
sbbq %r9, %r8
movq 80(%rdx), %r10
movq 80(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 72(%rdi)
sbbq %r10, %r9
movq 88(%rdx), %r8
movq 88(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 80(%rdi)
sbbq %r8, %r10
movq 96(%rdx), %r9
movq 96(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 88(%rdi)
sbbq %r9, %r8
movq 104(%rdx), %r10
movq 104(%rsi), %r9
pextq %rcx, %r10, %r10
movq %r8, 96(%rdi)
sbbq %r10, %r9
movq 112(%rdx), %r8
movq 112(%rsi), %r10
pextq %rcx, %r8, %r8
movq %r9, 104(%rdi)
sbbq %r8, %r10
movq 120(%rdx), %r9
movq 120(%rsi), %r8
pextq %rcx, %r9, %r9
movq %r10, 112(%rdi)
sbbq %r9, %r8
movq %r8, 120(%rdi)
sbbq %rax, %rax
repz retq
#ifndef __APPLE__
.size sp_1024_cond_sub_avx2_16,.-sp_1024_cond_sub_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_1024_mul_d_16
.type sp_1024_mul_d_16,@function
.align 16
sp_1024_mul_d_16:
#else
.section __TEXT,__text
.globl _sp_1024_mul_d_16
.p2align 4
_sp_1024_mul_d_16:
#endif /* __APPLE__ */
movq %rdx, %rcx
# A[0] * B
movq %rcx, %rax
xorq %r10, %r10
mulq (%rsi)
movq %rax, %r8
movq %rdx, %r9
movq %r8, (%rdi)
# A[1] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 8(%rsi)
addq %rax, %r9
movq %r9, 8(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[2] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 16(%rsi)
addq %rax, %r10
movq %r10, 16(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[3] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 24(%rsi)
addq %rax, %r8
movq %r8, 24(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[4] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 32(%rsi)
addq %rax, %r9
movq %r9, 32(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[5] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 40(%rsi)
addq %rax, %r10
movq %r10, 40(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[6] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 48(%rsi)
addq %rax, %r8
movq %r8, 48(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[7] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 56(%rsi)
addq %rax, %r9
movq %r9, 56(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[8] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 64(%rsi)
addq %rax, %r10
movq %r10, 64(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[9] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 72(%rsi)
addq %rax, %r8
movq %r8, 72(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[10] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 80(%rsi)
addq %rax, %r9
movq %r9, 80(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[11] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 88(%rsi)
addq %rax, %r10
movq %r10, 88(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[12] * B
movq %rcx, %rax
xorq %r10, %r10
mulq 96(%rsi)
addq %rax, %r8
movq %r8, 96(%rdi)
adcq %rdx, %r9
adcq $0x00, %r10
# A[13] * B
movq %rcx, %rax
xorq %r8, %r8
mulq 104(%rsi)
addq %rax, %r9
movq %r9, 104(%rdi)
adcq %rdx, %r10
adcq $0x00, %r8
# A[14] * B
movq %rcx, %rax
xorq %r9, %r9
mulq 112(%rsi)
addq %rax, %r10
movq %r10, 112(%rdi)
adcq %rdx, %r8
adcq $0x00, %r9
# A[15] * B
movq %rcx, %rax
mulq 120(%rsi)
addq %rax, %r8
adcq %rdx, %r9
movq %r8, 120(%rdi)
movq %r9, 128(%rdi)
repz retq
#ifndef __APPLE__
.size sp_1024_mul_d_16,.-sp_1024_mul_d_16
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Mul a by digit b into r. (r = a * b)
*
* r A single precision integer.
* a A single precision integer.
* b A single precision digit.
*/
#ifndef __APPLE__
.text
.globl sp_1024_mul_d_avx2_16
.type sp_1024_mul_d_avx2_16,@function
.align 16
sp_1024_mul_d_avx2_16:
#else
.section __TEXT,__text
.globl _sp_1024_mul_d_avx2_16
.p2align 4
_sp_1024_mul_d_avx2_16:
#endif /* __APPLE__ */
movq %rdx, %rax
# A[0] * B
movq %rax, %rdx
xorq %r11, %r11
mulxq (%rsi), %r9, %r10
movq %r9, (%rdi)
# A[1] * B
mulxq 8(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 8(%rdi)
# A[2] * B
mulxq 16(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 16(%rdi)
# A[3] * B
mulxq 24(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 24(%rdi)
# A[4] * B
mulxq 32(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 32(%rdi)
# A[5] * B
mulxq 40(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 40(%rdi)
# A[6] * B
mulxq 48(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 48(%rdi)
# A[7] * B
mulxq 56(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 56(%rdi)
# A[8] * B
mulxq 64(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 64(%rdi)
# A[9] * B
mulxq 72(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 72(%rdi)
# A[10] * B
mulxq 80(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 80(%rdi)
# A[11] * B
mulxq 88(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 88(%rdi)
# A[12] * B
mulxq 96(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 96(%rdi)
# A[13] * B
mulxq 104(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
movq %r10, 104(%rdi)
# A[14] * B
mulxq 112(%rsi), %rcx, %r8
movq %r11, %r10
adcxq %rcx, %r9
adoxq %r8, %r10
movq %r9, 112(%rdi)
# A[15] * B
mulxq 120(%rsi), %rcx, %r8
movq %r11, %r9
adcxq %rcx, %r10
adoxq %r8, %r9
adcxq %r11, %r9
movq %r10, 120(%rdi)
movq %r9, 128(%rdi)
repz retq
#ifndef __APPLE__
.size sp_1024_mul_d_avx2_16,.-sp_1024_mul_d_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef _WIN64
/* Divide the double width number (d1|d0) by the dividend. (d1|d0 / div)
*
* d1 The high order half of the number to divide.
* d0 The low order half of the number to divide.
* div The dividend.
* returns the result of the division.
*/
#ifndef __APPLE__
.text
.globl div_1024_word_asm_16
.type div_1024_word_asm_16,@function
.align 16
div_1024_word_asm_16:
#else
.section __TEXT,__text
.globl _div_1024_word_asm_16
.p2align 4
_div_1024_word_asm_16:
#endif /* __APPLE__ */
movq %rdx, %rcx
movq %rsi, %rax
movq %rdi, %rdx
divq %rcx
repz retq
#ifndef __APPLE__
.size div_1024_word_asm_16,.-div_1024_word_asm_16
#endif /* __APPLE__ */
#endif /* _WIN64 */
/* Compare a with b in constant time.
*
* a A single precision integer.
* b A single precision integer.
* return -ve, 0 or +ve if a is less than, equal to or greater than b
* respectively.
*/
#ifndef __APPLE__
.text
.globl sp_1024_cmp_16
.type sp_1024_cmp_16,@function
.align 16
sp_1024_cmp_16:
#else
.section __TEXT,__text
.globl _sp_1024_cmp_16
.p2align 4
_sp_1024_cmp_16:
#endif /* __APPLE__ */
xorq %rcx, %rcx
movq $-1, %rdx
movq $-1, %rax
movq $0x01, %r8
movq 120(%rdi), %r9
movq 120(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 112(%rdi), %r9
movq 112(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 104(%rdi), %r9
movq 104(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 96(%rdi), %r9
movq 96(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 88(%rdi), %r9
movq 88(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 80(%rdi), %r9
movq 80(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 72(%rdi), %r9
movq 72(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 64(%rdi), %r9
movq 64(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 56(%rdi), %r9
movq 56(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 48(%rdi), %r9
movq 48(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 40(%rdi), %r9
movq 40(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 32(%rdi), %r9
movq 32(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 24(%rdi), %r9
movq 24(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 16(%rdi), %r9
movq 16(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq 8(%rdi), %r9
movq 8(%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
movq (%rdi), %r9
movq (%rsi), %r10
andq %rdx, %r9
andq %rdx, %r10
subq %r10, %r9
cmova %r8, %rax
cmovc %rdx, %rax
cmovnz %rcx, %rdx
xorq %rdx, %rax
repz retq
#ifndef __APPLE__
.size sp_1024_cmp_16,.-sp_1024_cmp_16
#endif /* __APPLE__ */
/* Conditionally copy a into r using the mask m.
* m is -1 to copy and 0 when not.
*
* r A single precision number to copy over.
* a A single precision number to copy.
* m Mask value to apply.
*/
#ifndef __APPLE__
.text
.globl sp_1024_cond_copy_16
.type sp_1024_cond_copy_16,@function
.align 16
sp_1024_cond_copy_16:
#else
.section __TEXT,__text
.globl _sp_1024_cond_copy_16
.p2align 4
_sp_1024_cond_copy_16:
#endif /* __APPLE__ */
movq (%rdi), %rax
movq 8(%rdi), %rcx
movq 16(%rdi), %r8
movq 24(%rdi), %r9
xorq (%rsi), %rax
xorq 8(%rsi), %rcx
xorq 16(%rsi), %r8
xorq 24(%rsi), %r9
andq %rdx, %rax
andq %rdx, %rcx
andq %rdx, %r8
andq %rdx, %r9
xorq %rax, (%rdi)
xorq %rcx, 8(%rdi)
xorq %r8, 16(%rdi)
xorq %r9, 24(%rdi)
movq 32(%rdi), %rax
movq 40(%rdi), %rcx
movq 48(%rdi), %r8
movq 56(%rdi), %r9
xorq 32(%rsi), %rax
xorq 40(%rsi), %rcx
xorq 48(%rsi), %r8
xorq 56(%rsi), %r9
andq %rdx, %rax
andq %rdx, %rcx
andq %rdx, %r8
andq %rdx, %r9
xorq %rax, 32(%rdi)
xorq %rcx, 40(%rdi)
xorq %r8, 48(%rdi)
xorq %r9, 56(%rdi)
movq 64(%rdi), %rax
movq 72(%rdi), %rcx
movq 80(%rdi), %r8
movq 88(%rdi), %r9
xorq 64(%rsi), %rax
xorq 72(%rsi), %rcx
xorq 80(%rsi), %r8
xorq 88(%rsi), %r9
andq %rdx, %rax
andq %rdx, %rcx
andq %rdx, %r8
andq %rdx, %r9
xorq %rax, 64(%rdi)
xorq %rcx, 72(%rdi)
xorq %r8, 80(%rdi)
xorq %r9, 88(%rdi)
movq 96(%rdi), %rax
movq 104(%rdi), %rcx
movq 112(%rdi), %r8
movq 120(%rdi), %r9
xorq 96(%rsi), %rax
xorq 104(%rsi), %rcx
xorq 112(%rsi), %r8
xorq 120(%rsi), %r9
andq %rdx, %rax
andq %rdx, %rcx
andq %rdx, %r8
andq %rdx, %r9
xorq %rax, 96(%rdi)
xorq %rcx, 104(%rdi)
xorq %r8, 112(%rdi)
xorq %r9, 120(%rdi)
repz retq
#ifndef __APPLE__
.size sp_1024_cond_copy_16,.-sp_1024_cond_copy_16
#endif /* __APPLE__ */
/* Reduce the number back to 1024 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_1024_mont_reduce_16
.type sp_1024_mont_reduce_16,@function
.align 16
sp_1024_mont_reduce_16:
#else
.section __TEXT,__text
.globl _sp_1024_mont_reduce_16
.p2align 4
_sp_1024_mont_reduce_16:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rdx, %rcx
xorq %r15, %r15
# i = 16
movq $16, %r8
movq (%rdi), %r13
movq 8(%rdi), %r14
L_1024_mont_reduce_16_loop:
# mu = a[i] * mp
movq %r13, %r11
imulq %rcx, %r11
# a[i+0] += m[0] * mu
movq %r11, %rax
xorq %r10, %r10
mulq (%rsi)
addq %rax, %r13
adcq %rdx, %r10
# a[i+1] += m[1] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 8(%rsi)
movq %r14, %r13
addq %rax, %r13
adcq %rdx, %r9
addq %r10, %r13
adcq $0x00, %r9
# a[i+2] += m[2] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 16(%rsi)
movq 16(%rdi), %r14
addq %rax, %r14
adcq %rdx, %r10
addq %r9, %r14
adcq $0x00, %r10
# a[i+3] += m[3] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 24(%rsi)
movq 24(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 24(%rdi)
adcq $0x00, %r9
# a[i+4] += m[4] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 32(%rsi)
movq 32(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 32(%rdi)
adcq $0x00, %r10
# a[i+5] += m[5] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 40(%rsi)
movq 40(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 40(%rdi)
adcq $0x00, %r9
# a[i+6] += m[6] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 48(%rsi)
movq 48(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 48(%rdi)
adcq $0x00, %r10
# a[i+7] += m[7] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 56(%rsi)
movq 56(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 56(%rdi)
adcq $0x00, %r9
# a[i+8] += m[8] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 64(%rsi)
movq 64(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 64(%rdi)
adcq $0x00, %r10
# a[i+9] += m[9] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 72(%rsi)
movq 72(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 72(%rdi)
adcq $0x00, %r9
# a[i+10] += m[10] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 80(%rsi)
movq 80(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 80(%rdi)
adcq $0x00, %r10
# a[i+11] += m[11] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 88(%rsi)
movq 88(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 88(%rdi)
adcq $0x00, %r9
# a[i+12] += m[12] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 96(%rsi)
movq 96(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 96(%rdi)
adcq $0x00, %r10
# a[i+13] += m[13] * mu
movq %r11, %rax
xorq %r9, %r9
mulq 104(%rsi)
movq 104(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r9
addq %r10, %r12
movq %r12, 104(%rdi)
adcq $0x00, %r9
# a[i+14] += m[14] * mu
movq %r11, %rax
xorq %r10, %r10
mulq 112(%rsi)
movq 112(%rdi), %r12
addq %rax, %r12
adcq %rdx, %r10
addq %r9, %r12
movq %r12, 112(%rdi)
adcq $0x00, %r10
# a[i+15] += m[15] * mu
movq %r11, %rax
mulq 120(%rsi)
movq 120(%rdi), %r12
addq %rax, %r10
adcq %r15, %rdx
movq $0x00, %r15
adcq $0x00, %r15
addq %r10, %r12
movq %r12, 120(%rdi)
adcq %rdx, 128(%rdi)
adcq $0x00, %r15
# i -= 1
addq $8, %rdi
decq %r8
jnz L_1024_mont_reduce_16_loop
movq 120(%rdi), %r12
movq %r13, (%rdi)
subq 120(%rsi), %r12
movq %r14, 8(%rdi)
sbbq %r12, %r12
negq %r15
notq %r12
orq %r12, %r15
#ifdef _WIN64
movq %rsi, %rdx
movq %r15, %rcx
#else
movq %r15, %rcx
movq %rsi, %rdx
#endif /* _WIN64 */
movq %rdi, %rsi
movq %rdi, %rdi
subq $0x80, %rdi
#ifndef __APPLE__
callq sp_1024_cond_sub_16@plt
#else
callq _sp_1024_cond_sub_16
#endif /* __APPLE__ */
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_1024_mont_reduce_16,.-sp_1024_mont_reduce_16
#endif /* __APPLE__ */
/* Add two Montgomery form numbers (r = a + b % m).
*
* r Result of addition.
* a First number to add in Montgomery form.
* b Second number to add in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_1024_mont_add_16
.type sp_1024_mont_add_16,@function
.align 16
sp_1024_mont_add_16:
#else
.section __TEXT,__text
.globl _sp_1024_mont_add_16
.p2align 4
_sp_1024_mont_add_16:
#endif /* __APPLE__ */
subq $0x80, %rsp
movq (%rsi), %rax
movq 8(%rsi), %r8
movq 16(%rsi), %r9
movq 24(%rsi), %r10
addq (%rdx), %rax
movq $0x00, %r11
adcq 8(%rdx), %r8
adcq 16(%rdx), %r9
adcq 24(%rdx), %r10
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq %r10, 24(%rdi)
movq 32(%rsi), %rax
movq 40(%rsi), %r8
movq 48(%rsi), %r9
movq 56(%rsi), %r10
adcq 32(%rdx), %rax
adcq 40(%rdx), %r8
adcq 48(%rdx), %r9
adcq 56(%rdx), %r10
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq %r9, 48(%rdi)
movq %r10, 56(%rdi)
movq 64(%rsi), %rax
movq 72(%rsi), %r8
movq 80(%rsi), %r9
movq 88(%rsi), %r10
adcq 64(%rdx), %rax
adcq 72(%rdx), %r8
adcq 80(%rdx), %r9
adcq 88(%rdx), %r10
movq %rax, 64(%rdi)
movq %r8, 72(%rdi)
movq %r9, 80(%rdi)
movq %r10, 88(%rdi)
movq 96(%rsi), %rax
movq 104(%rsi), %r8
movq 112(%rsi), %r9
movq 120(%rsi), %r10
adcq 96(%rdx), %rax
adcq 104(%rdx), %r8
adcq 112(%rdx), %r9
adcq 120(%rdx), %r10
movq %rax, 96(%rdi)
movq %r8, 104(%rdi)
movq %r9, 112(%rdi)
movq %r10, 120(%rdi)
sbbq $0x00, %r11
subq 120(%rcx), %r10
sbbq %r10, %r10
notq %r10
orq %r10, %r11
movq (%rcx), %r9
movq 8(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, (%rsp)
movq %r10, 8(%rsp)
movq 16(%rcx), %r9
movq 24(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 16(%rsp)
movq %r10, 24(%rsp)
movq 32(%rcx), %r9
movq 40(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 32(%rsp)
movq %r10, 40(%rsp)
movq 48(%rcx), %r9
movq 56(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 48(%rsp)
movq %r10, 56(%rsp)
movq 64(%rcx), %r9
movq 72(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 64(%rsp)
movq %r10, 72(%rsp)
movq 80(%rcx), %r9
movq 88(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 80(%rsp)
movq %r10, 88(%rsp)
movq 96(%rcx), %r9
movq 104(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 96(%rsp)
movq %r10, 104(%rsp)
movq 112(%rcx), %r9
movq 120(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 112(%rsp)
movq %r10, 120(%rsp)
movq (%rdi), %rax
movq 8(%rdi), %r8
subq (%rsp), %rax
sbbq 8(%rsp), %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq 16(%rdi), %rax
movq 24(%rdi), %r8
sbbq 16(%rsp), %rax
sbbq 24(%rsp), %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movq 32(%rdi), %rax
movq 40(%rdi), %r8
sbbq 32(%rsp), %rax
sbbq 40(%rsp), %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq 48(%rdi), %rax
movq 56(%rdi), %r8
sbbq 48(%rsp), %rax
sbbq 56(%rsp), %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
movq 64(%rdi), %rax
movq 72(%rdi), %r8
sbbq 64(%rsp), %rax
sbbq 72(%rsp), %r8
movq %rax, 64(%rdi)
movq %r8, 72(%rdi)
movq 80(%rdi), %rax
movq 88(%rdi), %r8
sbbq 80(%rsp), %rax
sbbq 88(%rsp), %r8
movq %rax, 80(%rdi)
movq %r8, 88(%rdi)
movq 96(%rdi), %rax
movq 104(%rdi), %r8
sbbq 96(%rsp), %rax
sbbq 104(%rsp), %r8
movq %rax, 96(%rdi)
movq %r8, 104(%rdi)
movq 112(%rdi), %rax
movq 120(%rdi), %r8
sbbq 112(%rsp), %rax
sbbq 120(%rsp), %r8
movq %rax, 112(%rdi)
movq %r8, 120(%rdi)
addq $0x80, %rsp
repz retq
#ifndef __APPLE__
.size sp_1024_mont_add_16,.-sp_1024_mont_add_16
#endif /* __APPLE__ */
/* Double a Montgomery form number (r = a + a % m).
*
* r Result of addition.
* a Number to souble in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_1024_mont_dbl_16
.type sp_1024_mont_dbl_16,@function
.align 16
sp_1024_mont_dbl_16:
#else
.section __TEXT,__text
.globl _sp_1024_mont_dbl_16
.p2align 4
_sp_1024_mont_dbl_16:
#endif /* __APPLE__ */
subq $0x80, %rsp
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
addq (%rsi), %rax
movq $0x00, %r10
adcq 8(%rsi), %rcx
adcq 16(%rsi), %r8
adcq 24(%rsi), %r9
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rsi), %rax
movq 40(%rsi), %rcx
movq 48(%rsi), %r8
movq 56(%rsi), %r9
adcq 32(%rsi), %rax
adcq 40(%rsi), %rcx
adcq 48(%rsi), %r8
adcq 56(%rsi), %r9
movq %rax, 32(%rdi)
movq %rcx, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rsi), %rax
movq 72(%rsi), %rcx
movq 80(%rsi), %r8
movq 88(%rsi), %r9
adcq 64(%rsi), %rax
adcq 72(%rsi), %rcx
adcq 80(%rsi), %r8
adcq 88(%rsi), %r9
movq %rax, 64(%rdi)
movq %rcx, 72(%rdi)
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
movq 96(%rsi), %rax
movq 104(%rsi), %rcx
movq 112(%rsi), %r8
movq 120(%rsi), %r9
adcq 96(%rsi), %rax
adcq 104(%rsi), %rcx
adcq 112(%rsi), %r8
adcq 120(%rsi), %r9
movq %rax, 96(%rdi)
movq %rcx, 104(%rdi)
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
sbbq $0x00, %r10
subq 120(%rdx), %r9
sbbq %r9, %r9
notq %r9
orq %r9, %r10
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 80(%rsp)
movq %r9, 88(%rsp)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 96(%rsp)
movq %r9, 104(%rsp)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 112(%rsp)
movq %r9, 120(%rsp)
movq (%rdi), %rax
movq 8(%rdi), %rcx
subq (%rsp), %rax
sbbq 8(%rsp), %rcx
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq 16(%rdi), %rax
movq 24(%rdi), %rcx
sbbq 16(%rsp), %rax
sbbq 24(%rsp), %rcx
movq %rax, 16(%rdi)
movq %rcx, 24(%rdi)
movq 32(%rdi), %rax
movq 40(%rdi), %rcx
sbbq 32(%rsp), %rax
sbbq 40(%rsp), %rcx
movq %rax, 32(%rdi)
movq %rcx, 40(%rdi)
movq 48(%rdi), %rax
movq 56(%rdi), %rcx
sbbq 48(%rsp), %rax
sbbq 56(%rsp), %rcx
movq %rax, 48(%rdi)
movq %rcx, 56(%rdi)
movq 64(%rdi), %rax
movq 72(%rdi), %rcx
sbbq 64(%rsp), %rax
sbbq 72(%rsp), %rcx
movq %rax, 64(%rdi)
movq %rcx, 72(%rdi)
movq 80(%rdi), %rax
movq 88(%rdi), %rcx
sbbq 80(%rsp), %rax
sbbq 88(%rsp), %rcx
movq %rax, 80(%rdi)
movq %rcx, 88(%rdi)
movq 96(%rdi), %rax
movq 104(%rdi), %rcx
sbbq 96(%rsp), %rax
sbbq 104(%rsp), %rcx
movq %rax, 96(%rdi)
movq %rcx, 104(%rdi)
movq 112(%rdi), %rax
movq 120(%rdi), %rcx
sbbq 112(%rsp), %rax
sbbq 120(%rsp), %rcx
movq %rax, 112(%rdi)
movq %rcx, 120(%rdi)
addq $0x80, %rsp
repz retq
#ifndef __APPLE__
.size sp_1024_mont_dbl_16,.-sp_1024_mont_dbl_16
#endif /* __APPLE__ */
/* Triple a Montgomery form number (r = a + a + a % m).
*
* r Result of addition.
* a Number to souble in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_1024_mont_tpl_16
.type sp_1024_mont_tpl_16,@function
.align 16
sp_1024_mont_tpl_16:
#else
.section __TEXT,__text
.globl _sp_1024_mont_tpl_16
.p2align 4
_sp_1024_mont_tpl_16:
#endif /* __APPLE__ */
subq $0x80, %rsp
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
addq (%rsi), %rax
movq $0x00, %r10
adcq 8(%rsi), %rcx
adcq 16(%rsi), %r8
adcq 24(%rsi), %r9
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rsi), %rax
movq 40(%rsi), %rcx
movq 48(%rsi), %r8
movq 56(%rsi), %r9
adcq 32(%rsi), %rax
adcq 40(%rsi), %rcx
adcq 48(%rsi), %r8
adcq 56(%rsi), %r9
movq %rax, 32(%rdi)
movq %rcx, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rsi), %rax
movq 72(%rsi), %rcx
movq 80(%rsi), %r8
movq 88(%rsi), %r9
adcq 64(%rsi), %rax
adcq 72(%rsi), %rcx
adcq 80(%rsi), %r8
adcq 88(%rsi), %r9
movq %rax, 64(%rdi)
movq %rcx, 72(%rdi)
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
movq 96(%rsi), %rax
movq 104(%rsi), %rcx
movq 112(%rsi), %r8
movq 120(%rsi), %r9
adcq 96(%rsi), %rax
adcq 104(%rsi), %rcx
adcq 112(%rsi), %r8
adcq 120(%rsi), %r9
movq %rax, 96(%rdi)
movq %rcx, 104(%rdi)
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
sbbq $0x00, %r10
subq 120(%rdx), %r9
sbbq %r9, %r9
notq %r9
orq %r9, %r10
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 80(%rsp)
movq %r9, 88(%rsp)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 96(%rsp)
movq %r9, 104(%rsp)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 112(%rsp)
movq %r9, 120(%rsp)
movq (%rdi), %rax
movq 8(%rdi), %rcx
subq (%rsp), %rax
sbbq 8(%rsp), %rcx
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq 16(%rdi), %rax
movq 24(%rdi), %rcx
sbbq 16(%rsp), %rax
sbbq 24(%rsp), %rcx
movq %rax, 16(%rdi)
movq %rcx, 24(%rdi)
movq 32(%rdi), %rax
movq 40(%rdi), %rcx
sbbq 32(%rsp), %rax
sbbq 40(%rsp), %rcx
movq %rax, 32(%rdi)
movq %rcx, 40(%rdi)
movq 48(%rdi), %rax
movq 56(%rdi), %rcx
sbbq 48(%rsp), %rax
sbbq 56(%rsp), %rcx
movq %rax, 48(%rdi)
movq %rcx, 56(%rdi)
movq 64(%rdi), %rax
movq 72(%rdi), %rcx
sbbq 64(%rsp), %rax
sbbq 72(%rsp), %rcx
movq %rax, 64(%rdi)
movq %rcx, 72(%rdi)
movq 80(%rdi), %rax
movq 88(%rdi), %rcx
sbbq 80(%rsp), %rax
sbbq 88(%rsp), %rcx
movq %rax, 80(%rdi)
movq %rcx, 88(%rdi)
movq 96(%rdi), %rax
movq 104(%rdi), %rcx
sbbq 96(%rsp), %rax
sbbq 104(%rsp), %rcx
movq %rax, 96(%rdi)
movq %rcx, 104(%rdi)
movq 112(%rdi), %rax
movq 120(%rdi), %rcx
sbbq 112(%rsp), %rax
sbbq 120(%rsp), %rcx
movq %rax, 112(%rdi)
movq %rcx, 120(%rdi)
movq (%rdi), %rax
movq 8(%rdi), %rcx
movq 16(%rdi), %r8
movq 24(%rdi), %r9
addq (%rsi), %rax
movq $0x00, %r10
adcq 8(%rsi), %rcx
adcq 16(%rsi), %r8
adcq 24(%rsi), %r9
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rdi), %rax
movq 40(%rdi), %rcx
movq 48(%rdi), %r8
movq 56(%rdi), %r9
adcq 32(%rsi), %rax
adcq 40(%rsi), %rcx
adcq 48(%rsi), %r8
adcq 56(%rsi), %r9
movq %rax, 32(%rdi)
movq %rcx, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rdi), %rax
movq 72(%rdi), %rcx
movq 80(%rdi), %r8
movq 88(%rdi), %r9
adcq 64(%rsi), %rax
adcq 72(%rsi), %rcx
adcq 80(%rsi), %r8
adcq 88(%rsi), %r9
movq %rax, 64(%rdi)
movq %rcx, 72(%rdi)
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
movq 96(%rdi), %rax
movq 104(%rdi), %rcx
movq 112(%rdi), %r8
movq 120(%rdi), %r9
adcq 96(%rsi), %rax
adcq 104(%rsi), %rcx
adcq 112(%rsi), %r8
adcq 120(%rsi), %r9
movq %rax, 96(%rdi)
movq %rcx, 104(%rdi)
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
sbbq $0x00, %r10
subq 120(%rdx), %r9
sbbq %r9, %r9
notq %r9
orq %r9, %r10
movq (%rdx), %r8
movq 8(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 64(%rsp)
movq %r9, 72(%rsp)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 80(%rsp)
movq %r9, 88(%rsp)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 96(%rsp)
movq %r9, 104(%rsp)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
andq %r10, %r8
andq %r10, %r9
movq %r8, 112(%rsp)
movq %r9, 120(%rsp)
movq (%rdi), %rax
movq 8(%rdi), %rcx
subq (%rsp), %rax
sbbq 8(%rsp), %rcx
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq 16(%rdi), %rax
movq 24(%rdi), %rcx
sbbq 16(%rsp), %rax
sbbq 24(%rsp), %rcx
movq %rax, 16(%rdi)
movq %rcx, 24(%rdi)
movq 32(%rdi), %rax
movq 40(%rdi), %rcx
sbbq 32(%rsp), %rax
sbbq 40(%rsp), %rcx
movq %rax, 32(%rdi)
movq %rcx, 40(%rdi)
movq 48(%rdi), %rax
movq 56(%rdi), %rcx
sbbq 48(%rsp), %rax
sbbq 56(%rsp), %rcx
movq %rax, 48(%rdi)
movq %rcx, 56(%rdi)
movq 64(%rdi), %rax
movq 72(%rdi), %rcx
sbbq 64(%rsp), %rax
sbbq 72(%rsp), %rcx
movq %rax, 64(%rdi)
movq %rcx, 72(%rdi)
movq 80(%rdi), %rax
movq 88(%rdi), %rcx
sbbq 80(%rsp), %rax
sbbq 88(%rsp), %rcx
movq %rax, 80(%rdi)
movq %rcx, 88(%rdi)
movq 96(%rdi), %rax
movq 104(%rdi), %rcx
sbbq 96(%rsp), %rax
sbbq 104(%rsp), %rcx
movq %rax, 96(%rdi)
movq %rcx, 104(%rdi)
movq 112(%rdi), %rax
movq 120(%rdi), %rcx
sbbq 112(%rsp), %rax
sbbq 120(%rsp), %rcx
movq %rax, 112(%rdi)
movq %rcx, 120(%rdi)
addq $0x80, %rsp
repz retq
#ifndef __APPLE__
.size sp_1024_mont_tpl_16,.-sp_1024_mont_tpl_16
#endif /* __APPLE__ */
/* Subtract two Montgomery form numbers (r = a - b % m).
*
* r Result of addition.
* a First number to add in Montgomery form.
* b Second number to add in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_1024_mont_sub_16
.type sp_1024_mont_sub_16,@function
.align 16
sp_1024_mont_sub_16:
#else
.section __TEXT,__text
.globl _sp_1024_mont_sub_16
.p2align 4
_sp_1024_mont_sub_16:
#endif /* __APPLE__ */
subq $0x80, %rsp
movq (%rsi), %rax
movq 8(%rsi), %r8
movq 16(%rsi), %r9
movq 24(%rsi), %r10
subq (%rdx), %rax
movq $0x00, %r11
sbbq 8(%rdx), %r8
sbbq 16(%rdx), %r9
sbbq 24(%rdx), %r10
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq %r10, 24(%rdi)
movq 32(%rsi), %rax
movq 40(%rsi), %r8
movq 48(%rsi), %r9
movq 56(%rsi), %r10
sbbq 32(%rdx), %rax
sbbq 40(%rdx), %r8
sbbq 48(%rdx), %r9
sbbq 56(%rdx), %r10
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq %r9, 48(%rdi)
movq %r10, 56(%rdi)
movq 64(%rsi), %rax
movq 72(%rsi), %r8
movq 80(%rsi), %r9
movq 88(%rsi), %r10
sbbq 64(%rdx), %rax
sbbq 72(%rdx), %r8
sbbq 80(%rdx), %r9
sbbq 88(%rdx), %r10
movq %rax, 64(%rdi)
movq %r8, 72(%rdi)
movq %r9, 80(%rdi)
movq %r10, 88(%rdi)
movq 96(%rsi), %rax
movq 104(%rsi), %r8
movq 112(%rsi), %r9
movq 120(%rsi), %r10
sbbq 96(%rdx), %rax
sbbq 104(%rdx), %r8
sbbq 112(%rdx), %r9
sbbq 120(%rdx), %r10
movq %rax, 96(%rdi)
movq %r8, 104(%rdi)
movq %r9, 112(%rdi)
movq %r10, 120(%rdi)
sbbq $0x00, %r11
movq (%rcx), %r9
movq 8(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, (%rsp)
movq %r10, 8(%rsp)
movq 16(%rcx), %r9
movq 24(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 16(%rsp)
movq %r10, 24(%rsp)
movq 32(%rcx), %r9
movq 40(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 32(%rsp)
movq %r10, 40(%rsp)
movq 48(%rcx), %r9
movq 56(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 48(%rsp)
movq %r10, 56(%rsp)
movq 64(%rcx), %r9
movq 72(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 64(%rsp)
movq %r10, 72(%rsp)
movq 80(%rcx), %r9
movq 88(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 80(%rsp)
movq %r10, 88(%rsp)
movq 96(%rcx), %r9
movq 104(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 96(%rsp)
movq %r10, 104(%rsp)
movq 112(%rcx), %r9
movq 120(%rcx), %r10
andq %r11, %r9
andq %r11, %r10
movq %r9, 112(%rsp)
movq %r10, 120(%rsp)
movq (%rdi), %rax
movq 8(%rdi), %r8
addq (%rsp), %rax
adcq 8(%rsp), %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq 16(%rdi), %rax
movq 24(%rdi), %r8
adcq 16(%rsp), %rax
adcq 24(%rsp), %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movq 32(%rdi), %rax
movq 40(%rdi), %r8
adcq 32(%rsp), %rax
adcq 40(%rsp), %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq 48(%rdi), %rax
movq 56(%rdi), %r8
adcq 48(%rsp), %rax
adcq 56(%rsp), %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
movq 64(%rdi), %rax
movq 72(%rdi), %r8
adcq 64(%rsp), %rax
adcq 72(%rsp), %r8
movq %rax, 64(%rdi)
movq %r8, 72(%rdi)
movq 80(%rdi), %rax
movq 88(%rdi), %r8
adcq 80(%rsp), %rax
adcq 88(%rsp), %r8
movq %rax, 80(%rdi)
movq %r8, 88(%rdi)
movq 96(%rdi), %rax
movq 104(%rdi), %r8
adcq 96(%rsp), %rax
adcq 104(%rsp), %r8
movq %rax, 96(%rdi)
movq %r8, 104(%rdi)
movq 112(%rdi), %rax
movq 120(%rdi), %r8
adcq 112(%rsp), %rax
adcq 120(%rsp), %r8
movq %rax, 112(%rdi)
movq %r8, 120(%rdi)
addq $0x80, %rsp
repz retq
#ifndef __APPLE__
.size sp_1024_mont_sub_16,.-sp_1024_mont_sub_16
#endif /* __APPLE__ */
/* Divide the number by 2 mod the modulus (prime). (r = a / 2 % m)
*
* r Result of division by 2.
* a Number to divide.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_1024_mont_div2_16
.type sp_1024_mont_div2_16,@function
.align 16
sp_1024_mont_div2_16:
#else
.section __TEXT,__text
.globl _sp_1024_mont_div2_16
.p2align 4
_sp_1024_mont_div2_16:
#endif /* __APPLE__ */
subq $0x80, %rsp
movq (%rsi), %r11
xorq %r10, %r10
movq %r11, %rax
andq $0x01, %r11
negq %r11
movq (%rdx), %r8
andq %r11, %r8
movq %r8, (%rsp)
movq 8(%rdx), %r8
andq %r11, %r8
movq %r8, 8(%rsp)
movq 16(%rdx), %r8
andq %r11, %r8
movq %r8, 16(%rsp)
movq 24(%rdx), %r8
andq %r11, %r8
movq %r8, 24(%rsp)
movq 32(%rdx), %r8
andq %r11, %r8
movq %r8, 32(%rsp)
movq 40(%rdx), %r8
andq %r11, %r8
movq %r8, 40(%rsp)
movq 48(%rdx), %r8
andq %r11, %r8
movq %r8, 48(%rsp)
movq 56(%rdx), %r8
andq %r11, %r8
movq %r8, 56(%rsp)
movq 64(%rdx), %r8
andq %r11, %r8
movq %r8, 64(%rsp)
movq 72(%rdx), %r8
andq %r11, %r8
movq %r8, 72(%rsp)
movq 80(%rdx), %r8
andq %r11, %r8
movq %r8, 80(%rsp)
movq 88(%rdx), %r8
andq %r11, %r8
movq %r8, 88(%rsp)
movq 96(%rdx), %r8
andq %r11, %r8
movq %r8, 96(%rsp)
movq 104(%rdx), %r8
andq %r11, %r8
movq %r8, 104(%rsp)
movq 112(%rdx), %r8
andq %r11, %r8
movq %r8, 112(%rsp)
movq 120(%rdx), %r8
andq %r11, %r8
movq %r8, 120(%rsp)
addq %rax, (%rsp)
movq 8(%rsi), %rax
adcq %rax, 8(%rsp)
movq 16(%rsi), %rax
adcq %rax, 16(%rsp)
movq 24(%rsi), %rax
adcq %rax, 24(%rsp)
movq 32(%rsi), %rax
adcq %rax, 32(%rsp)
movq 40(%rsi), %rax
adcq %rax, 40(%rsp)
movq 48(%rsi), %rax
adcq %rax, 48(%rsp)
movq 56(%rsi), %rax
adcq %rax, 56(%rsp)
movq 64(%rsi), %rax
adcq %rax, 64(%rsp)
movq 72(%rsi), %rax
adcq %rax, 72(%rsp)
movq 80(%rsi), %rax
adcq %rax, 80(%rsp)
movq 88(%rsi), %rax
adcq %rax, 88(%rsp)
movq 96(%rsi), %rax
adcq %rax, 96(%rsp)
movq 104(%rsi), %rax
adcq %rax, 104(%rsp)
movq 112(%rsi), %rax
adcq %rax, 112(%rsp)
movq 120(%rsi), %rax
adcq %rax, 120(%rsp)
adcq $0x00, %r10
movq (%rsp), %rax
movq 8(%rsp), %rcx
shrdq $0x01, %rcx, %rax
movq %rax, (%rdi)
movq 16(%rsp), %rax
shrdq $0x01, %rax, %rcx
movq %rcx, 8(%rdi)
movq 24(%rsp), %rcx
shrdq $0x01, %rcx, %rax
movq %rax, 16(%rdi)
movq 32(%rsp), %rax
shrdq $0x01, %rax, %rcx
movq %rcx, 24(%rdi)
movq 40(%rsp), %rcx
shrdq $0x01, %rcx, %rax
movq %rax, 32(%rdi)
movq 48(%rsp), %rax
shrdq $0x01, %rax, %rcx
movq %rcx, 40(%rdi)
movq 56(%rsp), %rcx
shrdq $0x01, %rcx, %rax
movq %rax, 48(%rdi)
movq 64(%rsp), %rax
shrdq $0x01, %rax, %rcx
movq %rcx, 56(%rdi)
movq 72(%rsp), %rcx
shrdq $0x01, %rcx, %rax
movq %rax, 64(%rdi)
movq 80(%rsp), %rax
shrdq $0x01, %rax, %rcx
movq %rcx, 72(%rdi)
movq 88(%rsp), %rcx
shrdq $0x01, %rcx, %rax
movq %rax, 80(%rdi)
movq 96(%rsp), %rax
shrdq $0x01, %rax, %rcx
movq %rcx, 88(%rdi)
movq 104(%rsp), %rcx
shrdq $0x01, %rcx, %rax
movq %rax, 96(%rdi)
movq 112(%rsp), %rax
shrdq $0x01, %rax, %rcx
movq %rcx, 104(%rdi)
movq 120(%rsp), %rcx
shrdq $0x01, %rcx, %rax
movq %rax, 112(%rdi)
shrdq $0x01, %r10, %rcx
movq %rcx, 120(%rdi)
addq $0x80, %rsp
repz retq
#ifndef __APPLE__
.size sp_1024_mont_div2_16,.-sp_1024_mont_div2_16
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX2
/* Reduce the number back to 1024 bits using Montgomery reduction.
*
* a A single precision number to reduce in place.
* m The single precision number representing the modulus.
* mp The digit representing the negative inverse of m mod 2^n.
*/
#ifndef __APPLE__
.text
.globl sp_1024_mont_reduce_avx2_16
.type sp_1024_mont_reduce_avx2_16,@function
.align 16
sp_1024_mont_reduce_avx2_16:
#else
.section __TEXT,__text
.globl _sp_1024_mont_reduce_avx2_16
.p2align 4
_sp_1024_mont_reduce_avx2_16:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq %rdx, %r8
xorq %rbp, %rbp
# i = 16
movq $16, %r9
movq (%rdi), %r12
movq 8(%rdi), %r13
movq 16(%rdi), %r14
movq 24(%rdi), %r15
addq $0x40, %rdi
xorq %rbp, %rbp
L_1024_mont_reduce_avx2_16_loop:
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r10
imulq %r8, %rdx
xorq %rbx, %rbx
# a[i+0] += m[0] * mu
mulxq (%rsi), %rax, %rcx
movq %r13, %r12
adcxq %rax, %r10
adoxq %rcx, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rax, %rcx
movq %r14, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rax, %rcx
movq %r15, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rax, %rcx
movq -32(%rdi), %r15
adcxq %rax, %r14
adoxq %rcx, %r15
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rax, %rcx
movq -24(%rdi), %r11
adcxq %rax, %r15
adoxq %rcx, %r11
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rax, %rcx
movq -16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -24(%rdi)
# a[i+6] += m[6] * mu
mulxq 48(%rsi), %rax, %rcx
movq -8(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -16(%rdi)
# a[i+7] += m[7] * mu
mulxq 56(%rsi), %rax, %rcx
movq (%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -8(%rdi)
# a[i+8] += m[8] * mu
mulxq 64(%rsi), %rax, %rcx
movq 8(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, (%rdi)
# a[i+9] += m[9] * mu
mulxq 72(%rsi), %rax, %rcx
movq 16(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 8(%rdi)
# a[i+10] += m[10] * mu
mulxq 80(%rsi), %rax, %rcx
movq 24(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 16(%rdi)
# a[i+11] += m[11] * mu
mulxq 88(%rsi), %rax, %rcx
movq 32(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 24(%rdi)
# a[i+12] += m[12] * mu
mulxq 96(%rsi), %rax, %rcx
movq 40(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 32(%rdi)
# a[i+13] += m[13] * mu
mulxq 104(%rsi), %rax, %rcx
movq 48(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 40(%rdi)
# a[i+14] += m[14] * mu
mulxq 112(%rsi), %rax, %rcx
movq 56(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 48(%rdi)
# a[i+15] += m[15] * mu
mulxq 120(%rsi), %rax, %rcx
movq 64(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 56(%rdi)
adcxq %rbp, %r10
movq %rbx, %rbp
movq %r10, 64(%rdi)
adoxq %rbx, %rbp
adcxq %rbx, %rbp
# mu = a[i] * mp
movq %r12, %rdx
movq %r12, %r10
imulq %r8, %rdx
xorq %rbx, %rbx
# a[i+0] += m[0] * mu
mulxq (%rsi), %rax, %rcx
movq %r13, %r12
adcxq %rax, %r10
adoxq %rcx, %r12
# a[i+1] += m[1] * mu
mulxq 8(%rsi), %rax, %rcx
movq %r14, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
# a[i+2] += m[2] * mu
mulxq 16(%rsi), %rax, %rcx
movq %r15, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
# a[i+3] += m[3] * mu
mulxq 24(%rsi), %rax, %rcx
movq -24(%rdi), %r15
adcxq %rax, %r14
adoxq %rcx, %r15
# a[i+4] += m[4] * mu
mulxq 32(%rsi), %rax, %rcx
movq -16(%rdi), %r11
adcxq %rax, %r15
adoxq %rcx, %r11
# a[i+5] += m[5] * mu
mulxq 40(%rsi), %rax, %rcx
movq -8(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, -16(%rdi)
# a[i+6] += m[6] * mu
mulxq 48(%rsi), %rax, %rcx
movq (%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, -8(%rdi)
# a[i+7] += m[7] * mu
mulxq 56(%rsi), %rax, %rcx
movq 8(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, (%rdi)
# a[i+8] += m[8] * mu
mulxq 64(%rsi), %rax, %rcx
movq 16(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 8(%rdi)
# a[i+9] += m[9] * mu
mulxq 72(%rsi), %rax, %rcx
movq 24(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 16(%rdi)
# a[i+10] += m[10] * mu
mulxq 80(%rsi), %rax, %rcx
movq 32(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 24(%rdi)
# a[i+11] += m[11] * mu
mulxq 88(%rsi), %rax, %rcx
movq 40(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 32(%rdi)
# a[i+12] += m[12] * mu
mulxq 96(%rsi), %rax, %rcx
movq 48(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 40(%rdi)
# a[i+13] += m[13] * mu
mulxq 104(%rsi), %rax, %rcx
movq 56(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 48(%rdi)
# a[i+14] += m[14] * mu
mulxq 112(%rsi), %rax, %rcx
movq 64(%rdi), %r11
adcxq %rax, %r10
adoxq %rcx, %r11
movq %r10, 56(%rdi)
# a[i+15] += m[15] * mu
mulxq 120(%rsi), %rax, %rcx
movq 72(%rdi), %r10
adcxq %rax, %r11
adoxq %rcx, %r10
movq %r11, 64(%rdi)
adcxq %rbp, %r10
movq %rbx, %rbp
movq %r10, 72(%rdi)
adoxq %rbx, %rbp
adcxq %rbx, %rbp
# a += 2
addq $16, %rdi
# i -= 2
subq $2, %r9
jnz L_1024_mont_reduce_avx2_16_loop
subq $0x40, %rdi
subq 120(%rsi), %r10
movq %rdi, %r8
sbbq %r10, %r10
negq %rbp
notq %r10
orq %r10, %rbp
subq $0x80, %rdi
movq (%rsi), %rcx
movq %r12, %rdx
pextq %rbp, %rcx, %rcx
subq %rcx, %rdx
movq 8(%rsi), %rcx
movq %r13, %rax
pextq %rbp, %rcx, %rcx
movq %rdx, (%rdi)
sbbq %rcx, %rax
movq 16(%rsi), %rdx
movq %r14, %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 8(%rdi)
sbbq %rdx, %rcx
movq 24(%rsi), %rax
movq %r15, %rdx
pextq %rbp, %rax, %rax
movq %rcx, 16(%rdi)
sbbq %rax, %rdx
movq 32(%rsi), %rcx
movq 32(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 24(%rdi)
sbbq %rcx, %rax
movq 40(%rsi), %rdx
movq 40(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 32(%rdi)
sbbq %rdx, %rcx
movq 48(%rsi), %rax
movq 48(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 40(%rdi)
sbbq %rax, %rdx
movq 56(%rsi), %rcx
movq 56(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 48(%rdi)
sbbq %rcx, %rax
movq 64(%rsi), %rdx
movq 64(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 56(%rdi)
sbbq %rdx, %rcx
movq 72(%rsi), %rax
movq 72(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 64(%rdi)
sbbq %rax, %rdx
movq 80(%rsi), %rcx
movq 80(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 72(%rdi)
sbbq %rcx, %rax
movq 88(%rsi), %rdx
movq 88(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 80(%rdi)
sbbq %rdx, %rcx
movq 96(%rsi), %rax
movq 96(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 88(%rdi)
sbbq %rax, %rdx
movq 104(%rsi), %rcx
movq 104(%r8), %rax
pextq %rbp, %rcx, %rcx
movq %rdx, 96(%rdi)
sbbq %rcx, %rax
movq 112(%rsi), %rdx
movq 112(%r8), %rcx
pextq %rbp, %rdx, %rdx
movq %rax, 104(%rdi)
sbbq %rdx, %rcx
movq 120(%rsi), %rax
movq 120(%r8), %rdx
pextq %rbp, %rax, %rax
movq %rcx, 112(%rdi)
sbbq %rax, %rdx
movq %rdx, 120(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sp_1024_mont_reduce_avx2_16,.-sp_1024_mont_reduce_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Add two Montgomery form numbers (r = a + b % m).
*
* r Result of addition.
* a First number to add in Montgomery form.
* b Second number to add in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_1024_mont_add_avx2_16
.type sp_1024_mont_add_avx2_16,@function
.align 16
sp_1024_mont_add_avx2_16:
#else
.section __TEXT,__text
.globl _sp_1024_mont_add_avx2_16
.p2align 4
_sp_1024_mont_add_avx2_16:
#endif /* __APPLE__ */
movq (%rsi), %rax
movq 8(%rsi), %r8
movq 16(%rsi), %r9
movq 24(%rsi), %r10
addq (%rdx), %rax
movq $0x00, %r11
adcq 8(%rdx), %r8
adcq 16(%rdx), %r9
adcq 24(%rdx), %r10
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq %r10, 24(%rdi)
movq 32(%rsi), %rax
movq 40(%rsi), %r8
movq 48(%rsi), %r9
movq 56(%rsi), %r10
adcq 32(%rdx), %rax
adcq 40(%rdx), %r8
adcq 48(%rdx), %r9
adcq 56(%rdx), %r10
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq %r9, 48(%rdi)
movq %r10, 56(%rdi)
movq 64(%rsi), %rax
movq 72(%rsi), %r8
movq 80(%rsi), %r9
movq 88(%rsi), %r10
adcq 64(%rdx), %rax
adcq 72(%rdx), %r8
adcq 80(%rdx), %r9
adcq 88(%rdx), %r10
movq %rax, 64(%rdi)
movq %r8, 72(%rdi)
movq %r9, 80(%rdi)
movq %r10, 88(%rdi)
movq 96(%rsi), %rax
movq 104(%rsi), %r8
movq 112(%rsi), %r9
movq 120(%rsi), %r10
adcq 96(%rdx), %rax
adcq 104(%rdx), %r8
adcq 112(%rdx), %r9
adcq 120(%rdx), %r10
movq %rax, 96(%rdi)
movq %r8, 104(%rdi)
movq %r9, 112(%rdi)
movq %r10, 120(%rdi)
sbbq $0x00, %r11
subq 120(%rcx), %r10
sbbq %r10, %r10
notq %r10
orq %r10, %r11
movq (%rcx), %r9
movq 8(%rcx), %r10
movq (%rdi), %rax
movq 8(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
subq %r9, %rax
sbbq %r10, %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq 16(%rcx), %r9
movq 24(%rcx), %r10
movq 16(%rdi), %rax
movq 24(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
sbbq %r9, %rax
sbbq %r10, %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movq 32(%rcx), %r9
movq 40(%rcx), %r10
movq 32(%rdi), %rax
movq 40(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
sbbq %r9, %rax
sbbq %r10, %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq 48(%rcx), %r9
movq 56(%rcx), %r10
movq 48(%rdi), %rax
movq 56(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
sbbq %r9, %rax
sbbq %r10, %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
movq 64(%rcx), %r9
movq 72(%rcx), %r10
movq 64(%rdi), %rax
movq 72(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
sbbq %r9, %rax
sbbq %r10, %r8
movq %rax, 64(%rdi)
movq %r8, 72(%rdi)
movq 80(%rcx), %r9
movq 88(%rcx), %r10
movq 80(%rdi), %rax
movq 88(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
sbbq %r9, %rax
sbbq %r10, %r8
movq %rax, 80(%rdi)
movq %r8, 88(%rdi)
movq 96(%rcx), %r9
movq 104(%rcx), %r10
movq 96(%rdi), %rax
movq 104(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
sbbq %r9, %rax
sbbq %r10, %r8
movq %rax, 96(%rdi)
movq %r8, 104(%rdi)
movq 112(%rcx), %r9
movq 120(%rcx), %r10
movq 112(%rdi), %rax
movq 120(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
sbbq %r9, %rax
sbbq %r10, %r8
movq %rax, 112(%rdi)
movq %r8, 120(%rdi)
repz retq
#ifndef __APPLE__
.size sp_1024_mont_add_avx2_16,.-sp_1024_mont_add_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Double a Montgomery form number (r = a + a % m).
*
* r Result of addition.
* a Number to souble in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_1024_mont_dbl_avx2_16
.type sp_1024_mont_dbl_avx2_16,@function
.align 16
sp_1024_mont_dbl_avx2_16:
#else
.section __TEXT,__text
.globl _sp_1024_mont_dbl_avx2_16
.p2align 4
_sp_1024_mont_dbl_avx2_16:
#endif /* __APPLE__ */
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
addq (%rsi), %rax
movq $0x00, %r10
adcq 8(%rsi), %rcx
adcq 16(%rsi), %r8
adcq 24(%rsi), %r9
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rsi), %rax
movq 40(%rsi), %rcx
movq 48(%rsi), %r8
movq 56(%rsi), %r9
adcq 32(%rsi), %rax
adcq 40(%rsi), %rcx
adcq 48(%rsi), %r8
adcq 56(%rsi), %r9
movq %rax, 32(%rdi)
movq %rcx, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rsi), %rax
movq 72(%rsi), %rcx
movq 80(%rsi), %r8
movq 88(%rsi), %r9
adcq 64(%rsi), %rax
adcq 72(%rsi), %rcx
adcq 80(%rsi), %r8
adcq 88(%rsi), %r9
movq %rax, 64(%rdi)
movq %rcx, 72(%rdi)
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
movq 96(%rsi), %rax
movq 104(%rsi), %rcx
movq 112(%rsi), %r8
movq 120(%rsi), %r9
adcq 96(%rsi), %rax
adcq 104(%rsi), %rcx
adcq 112(%rsi), %r8
adcq 120(%rsi), %r9
movq %rax, 96(%rdi)
movq %rcx, 104(%rdi)
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
sbbq $0x00, %r10
subq 120(%rdx), %r9
sbbq %r9, %r9
notq %r9
orq %r9, %r10
movq (%rdx), %r8
movq 8(%rdx), %r9
movq (%rdi), %rax
movq 8(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
subq %r8, %rax
sbbq %r9, %rcx
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
movq 16(%rdi), %rax
movq 24(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 16(%rdi)
movq %rcx, 24(%rdi)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
movq 32(%rdi), %rax
movq 40(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 32(%rdi)
movq %rcx, 40(%rdi)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
movq 48(%rdi), %rax
movq 56(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 48(%rdi)
movq %rcx, 56(%rdi)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
movq 64(%rdi), %rax
movq 72(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 64(%rdi)
movq %rcx, 72(%rdi)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
movq 80(%rdi), %rax
movq 88(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 80(%rdi)
movq %rcx, 88(%rdi)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
movq 96(%rdi), %rax
movq 104(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 96(%rdi)
movq %rcx, 104(%rdi)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
movq 112(%rdi), %rax
movq 120(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 112(%rdi)
movq %rcx, 120(%rdi)
repz retq
#ifndef __APPLE__
.size sp_1024_mont_dbl_avx2_16,.-sp_1024_mont_dbl_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Triple a Montgomery form number (r = a + a + a % m).
*
* r Result of addition.
* a Number to souble in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_1024_mont_tpl_avx2_16
.type sp_1024_mont_tpl_avx2_16,@function
.align 16
sp_1024_mont_tpl_avx2_16:
#else
.section __TEXT,__text
.globl _sp_1024_mont_tpl_avx2_16
.p2align 4
_sp_1024_mont_tpl_avx2_16:
#endif /* __APPLE__ */
movq (%rsi), %rax
movq 8(%rsi), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
addq (%rsi), %rax
movq $0x00, %r10
adcq 8(%rsi), %rcx
adcq 16(%rsi), %r8
adcq 24(%rsi), %r9
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rsi), %rax
movq 40(%rsi), %rcx
movq 48(%rsi), %r8
movq 56(%rsi), %r9
adcq 32(%rsi), %rax
adcq 40(%rsi), %rcx
adcq 48(%rsi), %r8
adcq 56(%rsi), %r9
movq %rax, 32(%rdi)
movq %rcx, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rsi), %rax
movq 72(%rsi), %rcx
movq 80(%rsi), %r8
movq 88(%rsi), %r9
adcq 64(%rsi), %rax
adcq 72(%rsi), %rcx
adcq 80(%rsi), %r8
adcq 88(%rsi), %r9
movq %rax, 64(%rdi)
movq %rcx, 72(%rdi)
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
movq 96(%rsi), %rax
movq 104(%rsi), %rcx
movq 112(%rsi), %r8
movq 120(%rsi), %r9
adcq 96(%rsi), %rax
adcq 104(%rsi), %rcx
adcq 112(%rsi), %r8
adcq 120(%rsi), %r9
movq %rax, 96(%rdi)
movq %rcx, 104(%rdi)
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
sbbq $0x00, %r10
subq 120(%rdx), %r9
sbbq %r9, %r9
notq %r9
orq %r9, %r10
movq (%rdx), %r8
movq 8(%rdx), %r9
movq (%rdi), %rax
movq 8(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
subq %r8, %rax
sbbq %r9, %rcx
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
movq 16(%rdi), %rax
movq 24(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 16(%rdi)
movq %rcx, 24(%rdi)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
movq 32(%rdi), %rax
movq 40(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 32(%rdi)
movq %rcx, 40(%rdi)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
movq 48(%rdi), %rax
movq 56(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 48(%rdi)
movq %rcx, 56(%rdi)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
movq 64(%rdi), %rax
movq 72(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 64(%rdi)
movq %rcx, 72(%rdi)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
movq 80(%rdi), %rax
movq 88(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 80(%rdi)
movq %rcx, 88(%rdi)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
movq 96(%rdi), %rax
movq 104(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 96(%rdi)
movq %rcx, 104(%rdi)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
movq 112(%rdi), %rax
movq 120(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 112(%rdi)
movq %rcx, 120(%rdi)
movq (%rdi), %rax
movq 8(%rdi), %rcx
movq 16(%rdi), %r8
movq 24(%rdi), %r9
addq (%rsi), %rax
movq $0x00, %r10
adcq 8(%rsi), %rcx
adcq 16(%rsi), %r8
adcq 24(%rsi), %r9
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rdi), %rax
movq 40(%rdi), %rcx
movq 48(%rdi), %r8
movq 56(%rdi), %r9
adcq 32(%rsi), %rax
adcq 40(%rsi), %rcx
adcq 48(%rsi), %r8
adcq 56(%rsi), %r9
movq %rax, 32(%rdi)
movq %rcx, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rdi), %rax
movq 72(%rdi), %rcx
movq 80(%rdi), %r8
movq 88(%rdi), %r9
adcq 64(%rsi), %rax
adcq 72(%rsi), %rcx
adcq 80(%rsi), %r8
adcq 88(%rsi), %r9
movq %rax, 64(%rdi)
movq %rcx, 72(%rdi)
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
movq 96(%rdi), %rax
movq 104(%rdi), %rcx
movq 112(%rdi), %r8
movq 120(%rdi), %r9
adcq 96(%rsi), %rax
adcq 104(%rsi), %rcx
adcq 112(%rsi), %r8
adcq 120(%rsi), %r9
movq %rax, 96(%rdi)
movq %rcx, 104(%rdi)
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
sbbq $0x00, %r10
subq 120(%rdx), %r9
sbbq %r9, %r9
notq %r9
orq %r9, %r10
movq (%rdx), %r8
movq 8(%rdx), %r9
movq (%rdi), %rax
movq 8(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
subq %r8, %rax
sbbq %r9, %rcx
movq %rax, (%rdi)
movq %rcx, 8(%rdi)
movq 16(%rdx), %r8
movq 24(%rdx), %r9
movq 16(%rdi), %rax
movq 24(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 16(%rdi)
movq %rcx, 24(%rdi)
movq 32(%rdx), %r8
movq 40(%rdx), %r9
movq 32(%rdi), %rax
movq 40(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 32(%rdi)
movq %rcx, 40(%rdi)
movq 48(%rdx), %r8
movq 56(%rdx), %r9
movq 48(%rdi), %rax
movq 56(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 48(%rdi)
movq %rcx, 56(%rdi)
movq 64(%rdx), %r8
movq 72(%rdx), %r9
movq 64(%rdi), %rax
movq 72(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 64(%rdi)
movq %rcx, 72(%rdi)
movq 80(%rdx), %r8
movq 88(%rdx), %r9
movq 80(%rdi), %rax
movq 88(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 80(%rdi)
movq %rcx, 88(%rdi)
movq 96(%rdx), %r8
movq 104(%rdx), %r9
movq 96(%rdi), %rax
movq 104(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 96(%rdi)
movq %rcx, 104(%rdi)
movq 112(%rdx), %r8
movq 120(%rdx), %r9
movq 112(%rdi), %rax
movq 120(%rdi), %rcx
pextq %r10, %r8, %r8
pextq %r10, %r9, %r9
sbbq %r8, %rax
sbbq %r9, %rcx
movq %rax, 112(%rdi)
movq %rcx, 120(%rdi)
repz retq
#ifndef __APPLE__
.size sp_1024_mont_tpl_avx2_16,.-sp_1024_mont_tpl_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Subtract two Montgomery form numbers (r = a - b % m).
*
* r Result of addition.
* a First number to add in Montgomery form.
* b Second number to add in Montgomery form.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_1024_mont_sub_avx2_16
.type sp_1024_mont_sub_avx2_16,@function
.align 16
sp_1024_mont_sub_avx2_16:
#else
.section __TEXT,__text
.globl _sp_1024_mont_sub_avx2_16
.p2align 4
_sp_1024_mont_sub_avx2_16:
#endif /* __APPLE__ */
movq (%rsi), %rax
movq 8(%rsi), %r8
movq 16(%rsi), %r9
movq 24(%rsi), %r10
subq (%rdx), %rax
movq $0x00, %r11
sbbq 8(%rdx), %r8
sbbq 16(%rdx), %r9
sbbq 24(%rdx), %r10
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq %r9, 16(%rdi)
movq %r10, 24(%rdi)
movq 32(%rsi), %rax
movq 40(%rsi), %r8
movq 48(%rsi), %r9
movq 56(%rsi), %r10
sbbq 32(%rdx), %rax
sbbq 40(%rdx), %r8
sbbq 48(%rdx), %r9
sbbq 56(%rdx), %r10
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq %r9, 48(%rdi)
movq %r10, 56(%rdi)
movq 64(%rsi), %rax
movq 72(%rsi), %r8
movq 80(%rsi), %r9
movq 88(%rsi), %r10
sbbq 64(%rdx), %rax
sbbq 72(%rdx), %r8
sbbq 80(%rdx), %r9
sbbq 88(%rdx), %r10
movq %rax, 64(%rdi)
movq %r8, 72(%rdi)
movq %r9, 80(%rdi)
movq %r10, 88(%rdi)
movq 96(%rsi), %rax
movq 104(%rsi), %r8
movq 112(%rsi), %r9
movq 120(%rsi), %r10
sbbq 96(%rdx), %rax
sbbq 104(%rdx), %r8
sbbq 112(%rdx), %r9
sbbq 120(%rdx), %r10
movq %rax, 96(%rdi)
movq %r8, 104(%rdi)
movq %r9, 112(%rdi)
movq %r10, 120(%rdi)
sbbq $0x00, %r11
movq (%rcx), %r9
movq 8(%rcx), %r10
movq (%rdi), %rax
movq 8(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
addq %r9, %rax
adcq %r10, %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq 16(%rcx), %r9
movq 24(%rcx), %r10
movq 16(%rdi), %rax
movq 24(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
adcq %r9, %rax
adcq %r10, %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movq 32(%rcx), %r9
movq 40(%rcx), %r10
movq 32(%rdi), %rax
movq 40(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
adcq %r9, %rax
adcq %r10, %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq 48(%rcx), %r9
movq 56(%rcx), %r10
movq 48(%rdi), %rax
movq 56(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
adcq %r9, %rax
adcq %r10, %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
movq 64(%rcx), %r9
movq 72(%rcx), %r10
movq 64(%rdi), %rax
movq 72(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
adcq %r9, %rax
adcq %r10, %r8
movq %rax, 64(%rdi)
movq %r8, 72(%rdi)
movq 80(%rcx), %r9
movq 88(%rcx), %r10
movq 80(%rdi), %rax
movq 88(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
adcq %r9, %rax
adcq %r10, %r8
movq %rax, 80(%rdi)
movq %r8, 88(%rdi)
movq 96(%rcx), %r9
movq 104(%rcx), %r10
movq 96(%rdi), %rax
movq 104(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
adcq %r9, %rax
adcq %r10, %r8
movq %rax, 96(%rdi)
movq %r8, 104(%rdi)
movq 112(%rcx), %r9
movq 120(%rcx), %r10
movq 112(%rdi), %rax
movq 120(%rdi), %r8
pextq %r11, %r9, %r9
pextq %r11, %r10, %r10
adcq %r9, %rax
adcq %r10, %r8
movq %rax, 112(%rdi)
movq %r8, 120(%rdi)
repz retq
#ifndef __APPLE__
.size sp_1024_mont_sub_avx2_16,.-sp_1024_mont_sub_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#ifdef HAVE_INTEL_AVX2
/* Divide the number by 2 mod the modulus (prime). (r = a / 2 % m)
*
* r Result of division by 2.
* a Number to divide.
* m Modulus (prime).
*/
#ifndef __APPLE__
.text
.globl sp_1024_mont_div2_avx2_16
.type sp_1024_mont_div2_avx2_16,@function
.align 16
sp_1024_mont_div2_avx2_16:
#else
.section __TEXT,__text
.globl _sp_1024_mont_div2_avx2_16
.p2align 4
_sp_1024_mont_div2_avx2_16:
#endif /* __APPLE__ */
movq (%rsi), %r11
xorq %r10, %r10
movq %r11, %r8
andq $0x01, %r11
negq %r11
movq (%rdx), %rax
movq 8(%rdx), %rcx
movq (%rsi), %r8
movq 8(%rsi), %r9
pextq %r11, %rax, %rax
pextq %r11, %rcx, %rcx
addq %rax, %r8
adcq %rcx, %r9
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq 16(%rdx), %rax
movq 24(%rdx), %rcx
movq 16(%rsi), %r8
movq 24(%rsi), %r9
pextq %r11, %rax, %rax
pextq %r11, %rcx, %rcx
adcq %rax, %r8
adcq %rcx, %r9
movq %r8, 16(%rdi)
movq %r9, 24(%rdi)
movq 32(%rdx), %rax
movq 40(%rdx), %rcx
movq 32(%rsi), %r8
movq 40(%rsi), %r9
pextq %r11, %rax, %rax
pextq %r11, %rcx, %rcx
adcq %rax, %r8
adcq %rcx, %r9
movq %r8, 32(%rdi)
movq %r9, 40(%rdi)
movq 48(%rdx), %rax
movq 56(%rdx), %rcx
movq 48(%rsi), %r8
movq 56(%rsi), %r9
pextq %r11, %rax, %rax
pextq %r11, %rcx, %rcx
adcq %rax, %r8
adcq %rcx, %r9
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq 64(%rdx), %rax
movq 72(%rdx), %rcx
movq 64(%rsi), %r8
movq 72(%rsi), %r9
pextq %r11, %rax, %rax
pextq %r11, %rcx, %rcx
adcq %rax, %r8
adcq %rcx, %r9
movq %r8, 64(%rdi)
movq %r9, 72(%rdi)
movq 80(%rdx), %rax
movq 88(%rdx), %rcx
movq 80(%rsi), %r8
movq 88(%rsi), %r9
pextq %r11, %rax, %rax
pextq %r11, %rcx, %rcx
adcq %rax, %r8
adcq %rcx, %r9
movq %r8, 80(%rdi)
movq %r9, 88(%rdi)
movq 96(%rdx), %rax
movq 104(%rdx), %rcx
movq 96(%rsi), %r8
movq 104(%rsi), %r9
pextq %r11, %rax, %rax
pextq %r11, %rcx, %rcx
adcq %rax, %r8
adcq %rcx, %r9
movq %r8, 96(%rdi)
movq %r9, 104(%rdi)
movq 112(%rdx), %rax
movq 120(%rdx), %rcx
movq 112(%rsi), %r8
movq 120(%rsi), %r9
pextq %r11, %rax, %rax
pextq %r11, %rcx, %rcx
adcq %rax, %r8
adcq %rcx, %r9
movq %r8, 112(%rdi)
movq %r9, 120(%rdi)
adcq $0x00, %r10
movq (%rdi), %r8
movq 8(%rdi), %r9
shrdq $0x01, %r9, %r8
movq %r8, (%rdi)
movq 16(%rdi), %r8
shrdq $0x01, %r8, %r9
movq %r9, 8(%rdi)
movq 24(%rdi), %r9
shrdq $0x01, %r9, %r8
movq %r8, 16(%rdi)
movq 32(%rdi), %r8
shrdq $0x01, %r8, %r9
movq %r9, 24(%rdi)
movq 40(%rdi), %r9
shrdq $0x01, %r9, %r8
movq %r8, 32(%rdi)
movq 48(%rdi), %r8
shrdq $0x01, %r8, %r9
movq %r9, 40(%rdi)
movq 56(%rdi), %r9
shrdq $0x01, %r9, %r8
movq %r8, 48(%rdi)
movq 64(%rdi), %r8
shrdq $0x01, %r8, %r9
movq %r9, 56(%rdi)
movq 72(%rdi), %r9
shrdq $0x01, %r9, %r8
movq %r8, 64(%rdi)
movq 80(%rdi), %r8
shrdq $0x01, %r8, %r9
movq %r9, 72(%rdi)
movq 88(%rdi), %r9
shrdq $0x01, %r9, %r8
movq %r8, 80(%rdi)
movq 96(%rdi), %r8
shrdq $0x01, %r8, %r9
movq %r9, 88(%rdi)
movq 104(%rdi), %r9
shrdq $0x01, %r9, %r8
movq %r8, 96(%rdi)
movq 112(%rdi), %r8
shrdq $0x01, %r8, %r9
movq %r9, 104(%rdi)
movq 120(%rdi), %r9
shrdq $0x01, %r9, %r8
movq %r8, 112(%rdi)
shrdq $0x01, %r10, %r9
movq %r9, 120(%rdi)
repz retq
#ifndef __APPLE__
.size sp_1024_mont_div2_avx2_16,.-sp_1024_mont_div2_avx2_16
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
/* Read big endian unsigned byte array into r.
* Uses the bswap instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_1024_from_bin_bswap
.type sp_1024_from_bin_bswap,@function
.align 16
sp_1024_from_bin_bswap:
#else
.section __TEXT,__text
.globl _sp_1024_from_bin_bswap
.p2align 4
_sp_1024_from_bin_bswap:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $0x80, %r10
xorq %r11, %r11
jmp L_1024_from_bin_bswap_64_end
L_1024_from_bin_bswap_64_start:
subq $0x40, %r9
movq 56(%r9), %rax
movq 48(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movq 40(%r9), %rax
movq 32(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movq 24(%r9), %rax
movq 16(%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movq 8(%r9), %rax
movq (%r9), %r8
bswapq %rax
bswapq %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_1024_from_bin_bswap_64_end:
cmpq $63, %rcx
jg L_1024_from_bin_bswap_64_start
jmp L_1024_from_bin_bswap_8_end
L_1024_from_bin_bswap_8_start:
subq $8, %r9
movq (%r9), %rax
bswapq %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_1024_from_bin_bswap_8_end:
cmpq $7, %rcx
jg L_1024_from_bin_bswap_8_start
cmpq %r11, %rcx
je L_1024_from_bin_bswap_hi_end
movq %r11, %r8
movq %r11, %rax
L_1024_from_bin_bswap_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_1024_from_bin_bswap_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_1024_from_bin_bswap_hi_end:
cmpq %r10, %rdi
jge L_1024_from_bin_bswap_zero_end
L_1024_from_bin_bswap_zero_start:
movq %r11, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_1024_from_bin_bswap_zero_start
L_1024_from_bin_bswap_zero_end:
repz retq
#ifndef __APPLE__
.size sp_1024_from_bin_bswap,.-sp_1024_from_bin_bswap
#endif /* __APPLE__ */
#ifndef NO_MOVBE_SUPPORT
/* Read big endian unsigned byte array into r.
* Uses the movbe instruction which is an optional instruction.
*
* r A single precision integer.
* size Maximum number of bytes to convert
* a Byte array.
* n Number of bytes in array to read.
*/
#ifndef __APPLE__
.text
.globl sp_1024_from_bin_movbe
.type sp_1024_from_bin_movbe,@function
.align 16
sp_1024_from_bin_movbe:
#else
.section __TEXT,__text
.globl _sp_1024_from_bin_movbe
.p2align 4
_sp_1024_from_bin_movbe:
#endif /* __APPLE__ */
movq %rdx, %r9
movq %rdi, %r10
addq %rcx, %r9
addq $0x80, %r10
jmp L_1024_from_bin_movbe_64_end
L_1024_from_bin_movbe_64_start:
subq $0x40, %r9
movbeq 56(%r9), %rax
movbeq 48(%r9), %r8
movq %rax, (%rdi)
movq %r8, 8(%rdi)
movbeq 40(%r9), %rax
movbeq 32(%r9), %r8
movq %rax, 16(%rdi)
movq %r8, 24(%rdi)
movbeq 24(%r9), %rax
movbeq 16(%r9), %r8
movq %rax, 32(%rdi)
movq %r8, 40(%rdi)
movbeq 8(%r9), %rax
movbeq (%r9), %r8
movq %rax, 48(%rdi)
movq %r8, 56(%rdi)
addq $0x40, %rdi
subq $0x40, %rcx
L_1024_from_bin_movbe_64_end:
cmpq $63, %rcx
jg L_1024_from_bin_movbe_64_start
jmp L_1024_from_bin_movbe_8_end
L_1024_from_bin_movbe_8_start:
subq $8, %r9
movbeq (%r9), %rax
movq %rax, (%rdi)
addq $8, %rdi
subq $8, %rcx
L_1024_from_bin_movbe_8_end:
cmpq $7, %rcx
jg L_1024_from_bin_movbe_8_start
cmpq $0x00, %rcx
je L_1024_from_bin_movbe_hi_end
movq $0x00, %r8
movq $0x00, %rax
L_1024_from_bin_movbe_hi_start:
movb (%rdx), %al
shlq $8, %r8
incq %rdx
addq %rax, %r8
decq %rcx
jg L_1024_from_bin_movbe_hi_start
movq %r8, (%rdi)
addq $8, %rdi
L_1024_from_bin_movbe_hi_end:
cmpq %r10, %rdi
jge L_1024_from_bin_movbe_zero_end
L_1024_from_bin_movbe_zero_start:
movq $0x00, (%rdi)
addq $8, %rdi
cmpq %r10, %rdi
jl L_1024_from_bin_movbe_zero_start
L_1024_from_bin_movbe_zero_end:
repz retq
#ifndef __APPLE__
.size sp_1024_from_bin_movbe,.-sp_1024_from_bin_movbe
#endif /* __APPLE__ */
#endif /* !NO_MOVBE_SUPPORT */
#endif /* WOLFSSL_SP_1024 */
#endif /* WOLFSSL_SP_X86_64_ASM */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_ComDMA/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aesc-silicon/ElemRV
| 1,855
|
hardware/scala/elemrv/demo/start.s
|
.equ REGBYTES, 0x4
.section .text
.global hang
.global init_trap
.global interrupt_enable
.global interrupt_disable
.extern isr_handle
_head:
li a0, 1
jal gpio_set_pin
jal _init_bss
li sp, 0x90010000
j _kernel
hang:
nop
beqz zero, hang
init_trap:
la t0, _irq_wrapper
csrw mtvec, t0
ret
_init_bss:
la t0, __bss_start
la t1, __bss_end
beq t0, t1, loop_end
loop_head:
sw zero, 0(t0)
beq t0, t1, loop_end
addi t0, t0, 4
j loop_head
loop_end:
ret
nop
_irq_wrapper:
add sp, sp, -16*REGBYTES
sw a0, 1*REGBYTES(sp)
sw a1, 2*REGBYTES(sp)
sw a2, 3*REGBYTES(sp)
sw a3, 4*REGBYTES(sp)
sw a4, 5*REGBYTES(sp)
sw a5, 6*REGBYTES(sp)
sw a6, 7*REGBYTES(sp)
sw a7, 8*REGBYTES(sp)
sw ra, 9*REGBYTES(sp)
sw t0, 10*REGBYTES(sp)
sw t1, 11*REGBYTES(sp)
sw t2, 12*REGBYTES(sp)
sw t3, 13*REGBYTES(sp)
sw t4, 14*REGBYTES(sp)
sw t5, 15*REGBYTES(sp)
sw t6, 16*REGBYTES(sp)
csrr a0, mcause
jal isr_handle
lw a0, 1*REGBYTES(sp)
lw a1, 2*REGBYTES(sp)
lw a2, 3*REGBYTES(sp)
lw a3, 4*REGBYTES(sp)
lw a4, 5*REGBYTES(sp)
lw a5, 6*REGBYTES(sp)
lw a6, 7*REGBYTES(sp)
lw a7, 8*REGBYTES(sp)
lw ra, 9*REGBYTES(sp)
lw t0, 10*REGBYTES(sp)
lw t1, 11*REGBYTES(sp)
lw t2, 12*REGBYTES(sp)
lw t3, 13*REGBYTES(sp)
lw t4, 14*REGBYTES(sp)
lw t5, 15*REGBYTES(sp)
lw t6, 16*REGBYTES(sp)
addi sp, sp, 16*REGBYTES
mret
timer_enable:
csrr a0, mie
ori a0, a0, 0x80
csrw mie, a0
ret
timer_disable:
csrr a0, mie
xori a0, a0, 0x80
csrw mie, a0
ret
interrupt_enable:
csrr a0, mie
li a1, 0x800
or a0, a0, a1
csrw mie, a0
csrr a0, mstatus
ori a0, a0, 0x8
csrw mstatus, a0
ret
interrupt_disable:
csrr a0, mie
li a1, 0x800
xor a0, a0, a1
csrw mie, a0
csrr a0, mstatus
xori a0, a0, 0x8
csrw mstatus, a0
ret
# Basic driver for debugging
gpio_set_pin:
li t0, 0xf0000000
sw a0, 0x10(t0)
sw a0, 0x14(t0)
ret
|
aesc-silicon/ElemRV
| 1,555
|
hardware/scala/elemrv/bootrom/start.s
|
.equ REGBYTES, 0x4
.section .text
_head:
li a0, 1
jal gpio_set_pin
jal _init_regs
#jal _init_memc
jal _init_bss
li a0, 0
jal gpio_set_pin
jal _relocate
# Jump to application
li ra, 0x90000000
ret
_init_regs:
li x2 , 0xA2A2A2A2
li x3 , 0xA3A3A3A3
li x4 , 0xA4A4A4A4
li x5 , 0xA5A5A5A5
li x6 , 0xA6A6A6A6
li x7 , 0xA7A7A7A7
li x8 , 0xA8A8A8A8
li x9 , 0xA9A9A9A9
li x10, 0xB0B0B0B0
li x11, 0xB1B1B1B1
li x12, 0xB2B2B2B2
li x13, 0xB3B3B3B3
li x14, 0xB4B4B4B4
li x15, 0xB5B5B5B5
li x16, 0xB6B6B6B6
li x17, 0xB7B7B7B7
li x18, 0xB8B8B8B8
li x19, 0xB9B9B9B9
li x20, 0xC0C0C0C0
li x21, 0xC1C1C1C1
li x22, 0xC2C2C2C2
li x23, 0xC3C3C3C3
li x24, 0xC4C4C4C4
li x25, 0xC5C5C5C5
li x26, 0xC6C6C6C6
li x27, 0xC7C7C7C7
li x28, 0xC8C8C8C8
li x29, 0xC9C9C9C9
li x30, 0xD0D0D0D0
li x31, 0xD1D1D1D1
ret
_init_memc:
li t0, 0xf0023000
li t1, 0x20
sw t1, 0x14(t0) # reset pulse
li t1, 0x40
sw t1, 0x18(t0) # reset hold
li t1, 7
sw t1, 0x20(t0) # latency cycles
li t1, 1
sw t1, 0x10(t0) # reset chip
ret
_init_bss:
la t0, __bss_start
la t1, __bss_end
beq t0, t1, loop_end
loop_head:
sw zero, 0(t0)
beq t0, t1, loop_end
addi t0, t0, 4
j loop_head
loop_end:
ret
nop
_relocate:
li t0, 0xa0010000
li t1, 0xa0012000
li t2, 0x90000000
beq t0, t1, relocate_loop_end
relocate_loop_head:
lw a0, 0x0(t0)
sw a0, 0x0(t2)
addi t0, t0, 4
addi t2, t2, 4
bne t0, t1, relocate_loop_head
relocate_loop_end:
ret
nop
# Basic driver for debugging
gpio_set_pin:
li t0, 0xf0000000
sw a0, 0x10(t0)
sw a0, 0x14(t0)
ret
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_ComDMA/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_ComDMA/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aerisarn/mesa-uwp
| 71,203
|
src/util/blake3/blake3_sse2_x86-64_windows_gnu.S
|
.intel_syntax noprefix
.global blake3_hash_many_sse2
.global _blake3_hash_many_sse2
.global blake3_compress_in_place_sse2
.global _blake3_compress_in_place_sse2
.global blake3_compress_xof_sse2
.global _blake3_compress_xof_sse2
.section .text
.p2align 6
_blake3_hash_many_sse2:
blake3_hash_many_sse2:
push r15
push r14
push r13
push r12
push rsi
push rdi
push rbx
push rbp
mov rbp, rsp
sub rsp, 528
and rsp, 0xFFFFFFFFFFFFFFC0
movdqa xmmword ptr [rsp+0x170], xmm6
movdqa xmmword ptr [rsp+0x180], xmm7
movdqa xmmword ptr [rsp+0x190], xmm8
movdqa xmmword ptr [rsp+0x1A0], xmm9
movdqa xmmword ptr [rsp+0x1B0], xmm10
movdqa xmmword ptr [rsp+0x1C0], xmm11
movdqa xmmword ptr [rsp+0x1D0], xmm12
movdqa xmmword ptr [rsp+0x1E0], xmm13
movdqa xmmword ptr [rsp+0x1F0], xmm14
movdqa xmmword ptr [rsp+0x200], xmm15
mov rdi, rcx
mov rsi, rdx
mov rdx, r8
mov rcx, r9
mov r8, qword ptr [rbp+0x68]
movzx r9, byte ptr [rbp+0x70]
neg r9d
movd xmm0, r9d
pshufd xmm0, xmm0, 0x00
movdqa xmmword ptr [rsp+0x130], xmm0
movdqa xmm1, xmm0
pand xmm1, xmmword ptr [ADD0+rip]
pand xmm0, xmmword ptr [ADD1+rip]
movdqa xmmword ptr [rsp+0x150], xmm0
movd xmm0, r8d
pshufd xmm0, xmm0, 0x00
paddd xmm0, xmm1
movdqa xmmword ptr [rsp+0x110], xmm0
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm1, xmm0
shr r8, 32
movd xmm2, r8d
pshufd xmm2, xmm2, 0x00
psubd xmm2, xmm1
movdqa xmmword ptr [rsp+0x120], xmm2
mov rbx, qword ptr [rbp+0x90]
mov r15, rdx
shl r15, 6
movzx r13d, byte ptr [rbp+0x78]
movzx r12d, byte ptr [rbp+0x88]
cmp rsi, 4
jc 3f
2:
movdqu xmm3, xmmword ptr [rcx]
pshufd xmm0, xmm3, 0x00
pshufd xmm1, xmm3, 0x55
pshufd xmm2, xmm3, 0xAA
pshufd xmm3, xmm3, 0xFF
movdqu xmm7, xmmword ptr [rcx+0x10]
pshufd xmm4, xmm7, 0x00
pshufd xmm5, xmm7, 0x55
pshufd xmm6, xmm7, 0xAA
pshufd xmm7, xmm7, 0xFF
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
movzx eax, byte ptr [rbp+0x80]
or eax, r13d
xor edx, edx
9:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movdqu xmm8, xmmword ptr [r8+rdx-0x40]
movdqu xmm9, xmmword ptr [r9+rdx-0x40]
movdqu xmm10, xmmword ptr [r10+rdx-0x40]
movdqu xmm11, xmmword ptr [r11+rdx-0x40]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp], xmm8
movdqa xmmword ptr [rsp+0x10], xmm9
movdqa xmmword ptr [rsp+0x20], xmm12
movdqa xmmword ptr [rsp+0x30], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x30]
movdqu xmm9, xmmword ptr [r9+rdx-0x30]
movdqu xmm10, xmmword ptr [r10+rdx-0x30]
movdqu xmm11, xmmword ptr [r11+rdx-0x30]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x40], xmm8
movdqa xmmword ptr [rsp+0x50], xmm9
movdqa xmmword ptr [rsp+0x60], xmm12
movdqa xmmword ptr [rsp+0x70], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x20]
movdqu xmm9, xmmword ptr [r9+rdx-0x20]
movdqu xmm10, xmmword ptr [r10+rdx-0x20]
movdqu xmm11, xmmword ptr [r11+rdx-0x20]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x80], xmm8
movdqa xmmword ptr [rsp+0x90], xmm9
movdqa xmmword ptr [rsp+0xA0], xmm12
movdqa xmmword ptr [rsp+0xB0], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x10]
movdqu xmm9, xmmword ptr [r9+rdx-0x10]
movdqu xmm10, xmmword ptr [r10+rdx-0x10]
movdqu xmm11, xmmword ptr [r11+rdx-0x10]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0xC0], xmm8
movdqa xmmword ptr [rsp+0xD0], xmm9
movdqa xmmword ptr [rsp+0xE0], xmm12
movdqa xmmword ptr [rsp+0xF0], xmm13
movdqa xmm9, xmmword ptr [BLAKE3_IV_1+rip]
movdqa xmm10, xmmword ptr [BLAKE3_IV_2+rip]
movdqa xmm11, xmmword ptr [BLAKE3_IV_3+rip]
movdqa xmm12, xmmword ptr [rsp+0x110]
movdqa xmm13, xmmword ptr [rsp+0x120]
movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN+rip]
movd xmm15, eax
pshufd xmm15, xmm15, 0x00
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [BLAKE3_IV_0+rip]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x80]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x70]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xB0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x50]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xC0]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xA0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0x60]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xF0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
pxor xmm0, xmm8
pxor xmm1, xmm9
pxor xmm2, xmm10
pxor xmm3, xmm11
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
pxor xmm4, xmm12
pxor xmm5, xmm13
pxor xmm6, xmm14
pxor xmm7, xmm15
mov eax, r13d
jne 9b
movdqa xmm9, xmm0
punpckldq xmm0, xmm1
punpckhdq xmm9, xmm1
movdqa xmm11, xmm2
punpckldq xmm2, xmm3
punpckhdq xmm11, xmm3
movdqa xmm1, xmm0
punpcklqdq xmm0, xmm2
punpckhqdq xmm1, xmm2
movdqa xmm3, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm3, xmm11
movdqu xmmword ptr [rbx], xmm0
movdqu xmmword ptr [rbx+0x20], xmm1
movdqu xmmword ptr [rbx+0x40], xmm9
movdqu xmmword ptr [rbx+0x60], xmm3
movdqa xmm9, xmm4
punpckldq xmm4, xmm5
punpckhdq xmm9, xmm5
movdqa xmm11, xmm6
punpckldq xmm6, xmm7
punpckhdq xmm11, xmm7
movdqa xmm5, xmm4
punpcklqdq xmm4, xmm6
punpckhqdq xmm5, xmm6
movdqa xmm7, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm7, xmm11
movdqu xmmword ptr [rbx+0x10], xmm4
movdqu xmmword ptr [rbx+0x30], xmm5
movdqu xmmword ptr [rbx+0x50], xmm9
movdqu xmmword ptr [rbx+0x70], xmm7
movdqa xmm1, xmmword ptr [rsp+0x110]
movdqa xmm0, xmm1
paddd xmm1, xmmword ptr [rsp+0x150]
movdqa xmmword ptr [rsp+0x110], xmm1
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm0, xmm1
movdqa xmm1, xmmword ptr [rsp+0x120]
psubd xmm1, xmm0
movdqa xmmword ptr [rsp+0x120], xmm1
add rbx, 128
add rdi, 32
sub rsi, 4
cmp rsi, 4
jnc 2b
test rsi, rsi
jne 3f
4:
movdqa xmm6, xmmword ptr [rsp+0x170]
movdqa xmm7, xmmword ptr [rsp+0x180]
movdqa xmm8, xmmword ptr [rsp+0x190]
movdqa xmm9, xmmword ptr [rsp+0x1A0]
movdqa xmm10, xmmword ptr [rsp+0x1B0]
movdqa xmm11, xmmword ptr [rsp+0x1C0]
movdqa xmm12, xmmword ptr [rsp+0x1D0]
movdqa xmm13, xmmword ptr [rsp+0x1E0]
movdqa xmm14, xmmword ptr [rsp+0x1F0]
movdqa xmm15, xmmword ptr [rsp+0x200]
mov rsp, rbp
pop rbp
pop rbx
pop rdi
pop rsi
pop r12
pop r13
pop r14
pop r15
ret
.p2align 5
3:
test esi, 0x2
je 3f
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movaps xmm8, xmm0
movaps xmm9, xmm1
movd xmm13, dword ptr [rsp+0x110]
movd xmm14, dword ptr [rsp+0x120]
punpckldq xmm13, xmm14
movaps xmmword ptr [rsp], xmm13
movd xmm14, dword ptr [rsp+0x114]
movd xmm13, dword ptr [rsp+0x124]
punpckldq xmm14, xmm13
movaps xmmword ptr [rsp+0x10], xmm14
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x80]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movaps xmm10, xmm2
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm3, xmm4
shufps xmm4, xmm5, 136
shufps xmm3, xmm5, 221
movaps xmm5, xmm3
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm3, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm3, xmm7, 221
pshufd xmm7, xmm3, 0x93
movups xmm12, xmmword ptr [r9+rdx-0x40]
movups xmm13, xmmword ptr [r9+rdx-0x30]
movaps xmm11, xmm12
shufps xmm12, xmm13, 136
shufps xmm11, xmm13, 221
movaps xmm13, xmm11
movups xmm14, xmmword ptr [r9+rdx-0x20]
movups xmm15, xmmword ptr [r9+rdx-0x10]
movaps xmm11, xmm14
shufps xmm14, xmm15, 136
pshufd xmm14, xmm14, 0x93
shufps xmm11, xmm15, 221
pshufd xmm15, xmm11, 0x93
shl rax, 0x20
or rax, 0x40
movq xmm3, rax
movdqa xmmword ptr [rsp+0x20], xmm3
movaps xmm3, xmmword ptr [rsp]
movaps xmm11, xmmword ptr [rsp+0x10]
punpcklqdq xmm3, xmmword ptr [rsp+0x20]
punpcklqdq xmm11, xmmword ptr [rsp+0x20]
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm8, xmm12
movaps xmmword ptr [rsp+0x20], xmm4
movaps xmmword ptr [rsp+0x30], xmm12
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
pshuflw xmm11, xmm11, 0xB1
pshufhw xmm11, xmm11, 0xB1
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm5
paddd xmm8, xmm13
movaps xmmword ptr [rsp+0x40], xmm5
movaps xmmword ptr [rsp+0x50], xmm13
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movdqa xmm13, xmm3
psrld xmm3, 8
pslld xmm13, 24
pxor xmm3, xmm13
movdqa xmm13, xmm11
psrld xmm11, 8
pslld xmm13, 24
pxor xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x93
pshufd xmm8, xmm8, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x39
pshufd xmm10, xmm10, 0x39
paddd xmm0, xmm6
paddd xmm8, xmm14
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
pshuflw xmm11, xmm11, 0xB1
pshufhw xmm11, xmm11, 0xB1
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm7
paddd xmm8, xmm15
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movdqa xmm13, xmm3
psrld xmm3, 8
pslld xmm13, 24
pxor xmm3, xmm13
movdqa xmm13, xmm11
psrld xmm11, 8
pslld xmm13, 24
pxor xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x39
pshufd xmm8, xmm8, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x93
pshufd xmm10, xmm10, 0x93
dec al
je 9f
movdqa xmm12, xmmword ptr [rsp+0x20]
movdqa xmm5, xmmword ptr [rsp+0x40]
pshufd xmm13, xmm12, 0x0F
shufps xmm12, xmm5, 214
pshufd xmm4, xmm12, 0x39
movdqa xmm12, xmm6
shufps xmm12, xmm7, 250
pand xmm13, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm12, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm13, xmm12
movdqa xmmword ptr [rsp+0x20], xmm13
movdqa xmm12, xmm7
punpcklqdq xmm12, xmm5
movdqa xmm13, xmm6
pand xmm12, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm13, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm12, xmm13
pshufd xmm12, xmm12, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmmword ptr [rsp+0x40], xmm12
movdqa xmm5, xmmword ptr [rsp+0x30]
movdqa xmm13, xmmword ptr [rsp+0x50]
pshufd xmm6, xmm5, 0x0F
shufps xmm5, xmm13, 214
pshufd xmm12, xmm5, 0x39
movdqa xmm5, xmm14
shufps xmm5, xmm15, 250
pand xmm6, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm5, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm6, xmm5
movdqa xmm5, xmm15
punpcklqdq xmm5, xmm13
movdqa xmmword ptr [rsp+0x30], xmm2
movdqa xmm2, xmm14
pand xmm5, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm2, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm5, xmm2
movdqa xmm2, xmmword ptr [rsp+0x30]
pshufd xmm5, xmm5, 0x78
punpckhdq xmm13, xmm15
punpckldq xmm14, xmm13
pshufd xmm15, xmm14, 0x1E
movdqa xmm13, xmm6
movdqa xmm14, xmm5
movdqa xmm5, xmmword ptr [rsp+0x20]
movdqa xmm6, xmmword ptr [rsp+0x40]
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm8, xmm10
pxor xmm9, xmm11
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
movups xmmword ptr [rbx+0x20], xmm8
movups xmmword ptr [rbx+0x30], xmm9
mov eax, dword ptr [rsp+0x130]
neg eax
mov r10d, dword ptr [rsp+0x110+8*rax]
mov r11d, dword ptr [rsp+0x120+8*rax]
mov dword ptr [rsp+0x110], r10d
mov dword ptr [rsp+0x120], r11d
add rdi, 16
add rbx, 64
sub rsi, 2
3:
test esi, 0x1
je 4b
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movd xmm13, dword ptr [rsp+0x110]
movd xmm14, dword ptr [rsp+0x120]
punpckldq xmm13, xmm14
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x80]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
shl rax, 32
or rax, 64
movq xmm12, rax
movdqa xmm3, xmm13
punpcklqdq xmm3, xmm12
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm9, xmm8
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
movdqa xmm10, xmm6
pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm8, xmm10
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
jmp 4b
.p2align 6
blake3_compress_in_place_sse2:
_blake3_compress_in_place_sse2:
sub rsp, 120
movdqa xmmword ptr [rsp], xmm6
movdqa xmmword ptr [rsp+0x10], xmm7
movdqa xmmword ptr [rsp+0x20], xmm8
movdqa xmmword ptr [rsp+0x30], xmm9
movdqa xmmword ptr [rsp+0x40], xmm11
movdqa xmmword ptr [rsp+0x50], xmm14
movdqa xmmword ptr [rsp+0x60], xmm15
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movzx eax, byte ptr [rsp+0xA0]
movzx r8d, r8b
shl rax, 32
add r8, rax
movq xmm3, r9
movq xmm4, r8
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rdx]
movups xmm5, xmmword ptr [rdx+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rdx+0x20]
movups xmm7, xmmword ptr [rdx+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm9, xmm8
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
movdqa xmm14, xmm6
pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm14, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm8, xmm14
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
movups xmmword ptr [rcx], xmm0
movups xmmword ptr [rcx+0x10], xmm1
movdqa xmm6, xmmword ptr [rsp]
movdqa xmm7, xmmword ptr [rsp+0x10]
movdqa xmm8, xmmword ptr [rsp+0x20]
movdqa xmm9, xmmword ptr [rsp+0x30]
movdqa xmm11, xmmword ptr [rsp+0x40]
movdqa xmm14, xmmword ptr [rsp+0x50]
movdqa xmm15, xmmword ptr [rsp+0x60]
add rsp, 120
ret
.p2align 6
_blake3_compress_xof_sse2:
blake3_compress_xof_sse2:
sub rsp, 120
movdqa xmmword ptr [rsp], xmm6
movdqa xmmword ptr [rsp+0x10], xmm7
movdqa xmmword ptr [rsp+0x20], xmm8
movdqa xmmword ptr [rsp+0x30], xmm9
movdqa xmmword ptr [rsp+0x40], xmm11
movdqa xmmword ptr [rsp+0x50], xmm14
movdqa xmmword ptr [rsp+0x60], xmm15
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movzx eax, byte ptr [rsp+0xA0]
movzx r8d, r8b
mov r10, qword ptr [rsp+0xA8]
shl rax, 32
add r8, rax
movq xmm3, r9
movq xmm4, r8
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rdx]
movups xmm5, xmmword ptr [rdx+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rdx+0x20]
movups xmm7, xmmword ptr [rdx+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm9, xmm8
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
movdqa xmm14, xmm6
pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm14, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm8, xmm14
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
movdqu xmm4, xmmword ptr [rcx]
movdqu xmm5, xmmword ptr [rcx+0x10]
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm2, xmm4
pxor xmm3, xmm5
movups xmmword ptr [r10], xmm0
movups xmmword ptr [r10+0x10], xmm1
movups xmmword ptr [r10+0x20], xmm2
movups xmmword ptr [r10+0x30], xmm3
movdqa xmm6, xmmword ptr [rsp]
movdqa xmm7, xmmword ptr [rsp+0x10]
movdqa xmm8, xmmword ptr [rsp+0x20]
movdqa xmm9, xmmword ptr [rsp+0x30]
movdqa xmm11, xmmword ptr [rsp+0x40]
movdqa xmm14, xmmword ptr [rsp+0x50]
movdqa xmm15, xmmword ptr [rsp+0x60]
add rsp, 120
ret
.section .rodata
.p2align 6
BLAKE3_IV:
.long 0x6A09E667, 0xBB67AE85
.long 0x3C6EF372, 0xA54FF53A
ADD0:
.long 0, 1, 2, 3
ADD1:
.long 4, 4, 4, 4
BLAKE3_IV_0:
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
BLAKE3_BLOCK_LEN:
.long 64, 64, 64, 64
CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
PBLENDW_0x33_MASK:
.long 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000
PBLENDW_0xCC_MASK:
.long 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF
PBLENDW_0x3F_MASK:
.long 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000
PBLENDW_0xC0_MASK:
.long 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF
|
aerisarn/mesa-uwp
| 91,004
|
src/util/blake3/blake3_avx512_x86-64_windows_gnu.S
|
.intel_syntax noprefix
.global _blake3_hash_many_avx512
.global blake3_hash_many_avx512
.global blake3_compress_in_place_avx512
.global _blake3_compress_in_place_avx512
.global blake3_compress_xof_avx512
.global _blake3_compress_xof_avx512
.section .text
.p2align 6
_blake3_hash_many_avx512:
blake3_hash_many_avx512:
push r15
push r14
push r13
push r12
push rdi
push rsi
push rbx
push rbp
mov rbp, rsp
sub rsp, 304
and rsp, 0xFFFFFFFFFFFFFFC0
vmovdqa xmmword ptr [rsp+0x90], xmm6
vmovdqa xmmword ptr [rsp+0xA0], xmm7
vmovdqa xmmword ptr [rsp+0xB0], xmm8
vmovdqa xmmword ptr [rsp+0xC0], xmm9
vmovdqa xmmword ptr [rsp+0xD0], xmm10
vmovdqa xmmword ptr [rsp+0xE0], xmm11
vmovdqa xmmword ptr [rsp+0xF0], xmm12
vmovdqa xmmword ptr [rsp+0x100], xmm13
vmovdqa xmmword ptr [rsp+0x110], xmm14
vmovdqa xmmword ptr [rsp+0x120], xmm15
mov rdi, rcx
mov rsi, rdx
mov rdx, r8
mov rcx, r9
mov r8, qword ptr [rbp+0x68]
movzx r9, byte ptr [rbp+0x70]
neg r9
kmovw k1, r9d
vmovd xmm0, r8d
vpbroadcastd ymm0, xmm0
shr r8, 32
vmovd xmm1, r8d
vpbroadcastd ymm1, xmm1
vmovdqa ymm4, ymm1
vmovdqa ymm5, ymm1
vpaddd ymm2, ymm0, ymmword ptr [ADD0+rip]
vpaddd ymm3, ymm0, ymmword ptr [ADD0+32+rip]
vpcmpltud k2, ymm2, ymm0
vpcmpltud k3, ymm3, ymm0
vpaddd ymm4 {k2}, ymm4, dword ptr [ADD1+rip] {1to8}
vpaddd ymm5 {k3}, ymm5, dword ptr [ADD1+rip] {1to8}
knotw k2, k1
vmovdqa32 ymm2 {k2}, ymm0
vmovdqa32 ymm3 {k2}, ymm0
vmovdqa32 ymm4 {k2}, ymm1
vmovdqa32 ymm5 {k2}, ymm1
vmovdqa ymmword ptr [rsp], ymm2
vmovdqa ymmword ptr [rsp+0x20], ymm3
vmovdqa ymmword ptr [rsp+0x40], ymm4
vmovdqa ymmword ptr [rsp+0x60], ymm5
shl rdx, 6
mov qword ptr [rsp+0x80], rdx
cmp rsi, 16
jc 3f
2:
vpbroadcastd zmm0, dword ptr [rcx]
vpbroadcastd zmm1, dword ptr [rcx+0x1*0x4]
vpbroadcastd zmm2, dword ptr [rcx+0x2*0x4]
vpbroadcastd zmm3, dword ptr [rcx+0x3*0x4]
vpbroadcastd zmm4, dword ptr [rcx+0x4*0x4]
vpbroadcastd zmm5, dword ptr [rcx+0x5*0x4]
vpbroadcastd zmm6, dword ptr [rcx+0x6*0x4]
vpbroadcastd zmm7, dword ptr [rcx+0x7*0x4]
movzx eax, byte ptr [rbp+0x78]
movzx ebx, byte ptr [rbp+0x80]
or eax, ebx
xor edx, edx
.p2align 5
9:
movzx ebx, byte ptr [rbp+0x88]
or ebx, eax
add rdx, 64
cmp rdx, qword ptr [rsp+0x80]
cmove eax, ebx
mov dword ptr [rsp+0x88], eax
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x40]
mov r13, qword ptr [rdi+0x48]
mov r14, qword ptr [rdi+0x50]
mov r15, qword ptr [rdi+0x58]
vmovdqu32 ymm16, ymmword ptr [rdx+r8-0x2*0x20]
vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-0x2*0x20], 0x01
vmovdqu32 ymm17, ymmword ptr [rdx+r9-0x2*0x20]
vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-0x2*0x20], 0x01
vpunpcklqdq zmm8, zmm16, zmm17
vpunpckhqdq zmm9, zmm16, zmm17
vmovdqu32 ymm18, ymmword ptr [rdx+r10-0x2*0x20]
vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-0x2*0x20], 0x01
vmovdqu32 ymm19, ymmword ptr [rdx+r11-0x2*0x20]
vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-0x2*0x20], 0x01
vpunpcklqdq zmm10, zmm18, zmm19
vpunpckhqdq zmm11, zmm18, zmm19
mov r8, qword ptr [rdi+0x20]
mov r9, qword ptr [rdi+0x28]
mov r10, qword ptr [rdi+0x30]
mov r11, qword ptr [rdi+0x38]
mov r12, qword ptr [rdi+0x60]
mov r13, qword ptr [rdi+0x68]
mov r14, qword ptr [rdi+0x70]
mov r15, qword ptr [rdi+0x78]
vmovdqu32 ymm16, ymmword ptr [rdx+r8-0x2*0x20]
vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-0x2*0x20], 0x01
vmovdqu32 ymm17, ymmword ptr [rdx+r9-0x2*0x20]
vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-0x2*0x20], 0x01
vpunpcklqdq zmm12, zmm16, zmm17
vpunpckhqdq zmm13, zmm16, zmm17
vmovdqu32 ymm18, ymmword ptr [rdx+r10-0x2*0x20]
vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-0x2*0x20], 0x01
vmovdqu32 ymm19, ymmword ptr [rdx+r11-0x2*0x20]
vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-0x2*0x20], 0x01
vpunpcklqdq zmm14, zmm18, zmm19
vpunpckhqdq zmm15, zmm18, zmm19
vmovdqa32 zmm27, zmmword ptr [INDEX0+rip]
vmovdqa32 zmm31, zmmword ptr [INDEX1+rip]
vshufps zmm16, zmm8, zmm10, 136
vshufps zmm17, zmm12, zmm14, 136
vmovdqa32 zmm20, zmm16
vpermt2d zmm16, zmm27, zmm17
vpermt2d zmm20, zmm31, zmm17
vshufps zmm17, zmm8, zmm10, 221
vshufps zmm30, zmm12, zmm14, 221
vmovdqa32 zmm21, zmm17
vpermt2d zmm17, zmm27, zmm30
vpermt2d zmm21, zmm31, zmm30
vshufps zmm18, zmm9, zmm11, 136
vshufps zmm8, zmm13, zmm15, 136
vmovdqa32 zmm22, zmm18
vpermt2d zmm18, zmm27, zmm8
vpermt2d zmm22, zmm31, zmm8
vshufps zmm19, zmm9, zmm11, 221
vshufps zmm8, zmm13, zmm15, 221
vmovdqa32 zmm23, zmm19
vpermt2d zmm19, zmm27, zmm8
vpermt2d zmm23, zmm31, zmm8
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x40]
mov r13, qword ptr [rdi+0x48]
mov r14, qword ptr [rdi+0x50]
mov r15, qword ptr [rdi+0x58]
vmovdqu32 ymm24, ymmword ptr [r8+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r9+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm8, zmm24, zmm25
vpunpckhqdq zmm9, zmm24, zmm25
vmovdqu32 ymm24, ymmword ptr [r10+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r11+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm10, zmm24, zmm25
vpunpckhqdq zmm11, zmm24, zmm25
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r12+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r13+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r14+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
prefetcht0 [r15+rdx+0x80]
mov r8, qword ptr [rdi+0x20]
mov r9, qword ptr [rdi+0x28]
mov r10, qword ptr [rdi+0x30]
mov r11, qword ptr [rdi+0x38]
mov r12, qword ptr [rdi+0x60]
mov r13, qword ptr [rdi+0x68]
mov r14, qword ptr [rdi+0x70]
mov r15, qword ptr [rdi+0x78]
vmovdqu32 ymm24, ymmword ptr [r8+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r9+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm12, zmm24, zmm25
vpunpckhqdq zmm13, zmm24, zmm25
vmovdqu32 ymm24, ymmword ptr [r10+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r11+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm14, zmm24, zmm25
vpunpckhqdq zmm15, zmm24, zmm25
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r12+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r13+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r14+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
prefetcht0 [r15+rdx+0x80]
vshufps zmm24, zmm8, zmm10, 136
vshufps zmm30, zmm12, zmm14, 136
vmovdqa32 zmm28, zmm24
vpermt2d zmm24, zmm27, zmm30
vpermt2d zmm28, zmm31, zmm30
vshufps zmm25, zmm8, zmm10, 221
vshufps zmm30, zmm12, zmm14, 221
vmovdqa32 zmm29, zmm25
vpermt2d zmm25, zmm27, zmm30
vpermt2d zmm29, zmm31, zmm30
vshufps zmm26, zmm9, zmm11, 136
vshufps zmm8, zmm13, zmm15, 136
vmovdqa32 zmm30, zmm26
vpermt2d zmm26, zmm27, zmm8
vpermt2d zmm30, zmm31, zmm8
vshufps zmm8, zmm9, zmm11, 221
vshufps zmm10, zmm13, zmm15, 221
vpermi2d zmm27, zmm8, zmm10
vpermi2d zmm31, zmm8, zmm10
vpbroadcastd zmm8, dword ptr [BLAKE3_IV_0+rip]
vpbroadcastd zmm9, dword ptr [BLAKE3_IV_1+rip]
vpbroadcastd zmm10, dword ptr [BLAKE3_IV_2+rip]
vpbroadcastd zmm11, dword ptr [BLAKE3_IV_3+rip]
vmovdqa32 zmm12, zmmword ptr [rsp]
vmovdqa32 zmm13, zmmword ptr [rsp+0x1*0x40]
vpbroadcastd zmm14, dword ptr [BLAKE3_BLOCK_LEN+rip]
vpbroadcastd zmm15, dword ptr [rsp+0x22*0x4]
vpaddd zmm0, zmm0, zmm16
vpaddd zmm1, zmm1, zmm18
vpaddd zmm2, zmm2, zmm20
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm17
vpaddd zmm1, zmm1, zmm19
vpaddd zmm2, zmm2, zmm21
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm24
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm28
vpaddd zmm3, zmm3, zmm30
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm25
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm29
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm18
vpaddd zmm1, zmm1, zmm19
vpaddd zmm2, zmm2, zmm23
vpaddd zmm3, zmm3, zmm20
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm22
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm16
vpaddd zmm3, zmm3, zmm29
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm17
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm25
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm27
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm30
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm19
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm29
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm20
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm18
vpaddd zmm3, zmm3, zmm30
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm22
vpaddd zmm1, zmm1, zmm25
vpaddd zmm2, zmm2, zmm27
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm21
vpaddd zmm1, zmm1, zmm16
vpaddd zmm2, zmm2, zmm31
vpaddd zmm3, zmm3, zmm17
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm26
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm30
vpaddd zmm3, zmm3, zmm29
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm23
vpaddd zmm1, zmm1, zmm25
vpaddd zmm2, zmm2, zmm19
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm20
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm21
vpaddd zmm3, zmm3, zmm17
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm16
vpaddd zmm1, zmm1, zmm18
vpaddd zmm2, zmm2, zmm24
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm28
vpaddd zmm1, zmm1, zmm25
vpaddd zmm2, zmm2, zmm31
vpaddd zmm3, zmm3, zmm30
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm29
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm26
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm23
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm16
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm18
vpaddd zmm1, zmm1, zmm19
vpaddd zmm2, zmm2, zmm17
vpaddd zmm3, zmm3, zmm20
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm25
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm24
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm30
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm28
vpaddd zmm3, zmm3, zmm17
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm29
vpaddd zmm1, zmm1, zmm16
vpaddd zmm2, zmm2, zmm18
vpaddd zmm3, zmm3, zmm20
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm19
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm22
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm27
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm17
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm31
vpaddd zmm1, zmm1, zmm16
vpaddd zmm2, zmm2, zmm25
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm30
vpaddd zmm1, zmm1, zmm18
vpaddd zmm2, zmm2, zmm19
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm26
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm20
vpaddd zmm3, zmm3, zmm29
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpxord zmm0, zmm0, zmm8
vpxord zmm1, zmm1, zmm9
vpxord zmm2, zmm2, zmm10
vpxord zmm3, zmm3, zmm11
vpxord zmm4, zmm4, zmm12
vpxord zmm5, zmm5, zmm13
vpxord zmm6, zmm6, zmm14
vpxord zmm7, zmm7, zmm15
movzx eax, byte ptr [rbp+0x78]
jne 9b
mov rbx, qword ptr [rbp+0x90]
vpunpckldq zmm16, zmm0, zmm1
vpunpckhdq zmm17, zmm0, zmm1
vpunpckldq zmm18, zmm2, zmm3
vpunpckhdq zmm19, zmm2, zmm3
vpunpckldq zmm20, zmm4, zmm5
vpunpckhdq zmm21, zmm4, zmm5
vpunpckldq zmm22, zmm6, zmm7
vpunpckhdq zmm23, zmm6, zmm7
vpunpcklqdq zmm0, zmm16, zmm18
vpunpckhqdq zmm1, zmm16, zmm18
vpunpcklqdq zmm2, zmm17, zmm19
vpunpckhqdq zmm3, zmm17, zmm19
vpunpcklqdq zmm4, zmm20, zmm22
vpunpckhqdq zmm5, zmm20, zmm22
vpunpcklqdq zmm6, zmm21, zmm23
vpunpckhqdq zmm7, zmm21, zmm23
vshufi32x4 zmm16, zmm0, zmm4, 0x88
vshufi32x4 zmm17, zmm1, zmm5, 0x88
vshufi32x4 zmm18, zmm2, zmm6, 0x88
vshufi32x4 zmm19, zmm3, zmm7, 0x88
vshufi32x4 zmm20, zmm0, zmm4, 0xDD
vshufi32x4 zmm21, zmm1, zmm5, 0xDD
vshufi32x4 zmm22, zmm2, zmm6, 0xDD
vshufi32x4 zmm23, zmm3, zmm7, 0xDD
vshufi32x4 zmm0, zmm16, zmm17, 0x88
vshufi32x4 zmm1, zmm18, zmm19, 0x88
vshufi32x4 zmm2, zmm20, zmm21, 0x88
vshufi32x4 zmm3, zmm22, zmm23, 0x88
vshufi32x4 zmm4, zmm16, zmm17, 0xDD
vshufi32x4 zmm5, zmm18, zmm19, 0xDD
vshufi32x4 zmm6, zmm20, zmm21, 0xDD
vshufi32x4 zmm7, zmm22, zmm23, 0xDD
vmovdqu32 zmmword ptr [rbx], zmm0
vmovdqu32 zmmword ptr [rbx+0x1*0x40], zmm1
vmovdqu32 zmmword ptr [rbx+0x2*0x40], zmm2
vmovdqu32 zmmword ptr [rbx+0x3*0x40], zmm3
vmovdqu32 zmmword ptr [rbx+0x4*0x40], zmm4
vmovdqu32 zmmword ptr [rbx+0x5*0x40], zmm5
vmovdqu32 zmmword ptr [rbx+0x6*0x40], zmm6
vmovdqu32 zmmword ptr [rbx+0x7*0x40], zmm7
vmovdqa32 zmm0, zmmword ptr [rsp]
vmovdqa32 zmm1, zmmword ptr [rsp+0x1*0x40]
vmovdqa32 zmm2, zmm0
vpaddd zmm2{k1}, zmm0, dword ptr [ADD16+rip] {1to16}
vpcmpltud k2, zmm2, zmm0
vpaddd zmm1 {k2}, zmm1, dword ptr [ADD1+rip] {1to16}
vmovdqa32 zmmword ptr [rsp], zmm2
vmovdqa32 zmmword ptr [rsp+0x1*0x40], zmm1
add rdi, 128
add rbx, 512
mov qword ptr [rbp+0x90], rbx
sub rsi, 16
cmp rsi, 16
jnc 2b
test rsi, rsi
jne 3f
4:
vzeroupper
vmovdqa xmm6, xmmword ptr [rsp+0x90]
vmovdqa xmm7, xmmword ptr [rsp+0xA0]
vmovdqa xmm8, xmmword ptr [rsp+0xB0]
vmovdqa xmm9, xmmword ptr [rsp+0xC0]
vmovdqa xmm10, xmmword ptr [rsp+0xD0]
vmovdqa xmm11, xmmword ptr [rsp+0xE0]
vmovdqa xmm12, xmmword ptr [rsp+0xF0]
vmovdqa xmm13, xmmword ptr [rsp+0x100]
vmovdqa xmm14, xmmword ptr [rsp+0x110]
vmovdqa xmm15, xmmword ptr [rsp+0x120]
mov rsp, rbp
pop rbp
pop rbx
pop rsi
pop rdi
pop r12
pop r13
pop r14
pop r15
ret
.p2align 6
3:
test esi, 0x8
je 3f
vpbroadcastd ymm0, dword ptr [rcx]
vpbroadcastd ymm1, dword ptr [rcx+0x4]
vpbroadcastd ymm2, dword ptr [rcx+0x8]
vpbroadcastd ymm3, dword ptr [rcx+0xC]
vpbroadcastd ymm4, dword ptr [rcx+0x10]
vpbroadcastd ymm5, dword ptr [rcx+0x14]
vpbroadcastd ymm6, dword ptr [rcx+0x18]
vpbroadcastd ymm7, dword ptr [rcx+0x1C]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x20]
mov r13, qword ptr [rdi+0x28]
mov r14, qword ptr [rdi+0x30]
mov r15, qword ptr [rdi+0x38]
movzx eax, byte ptr [rbp+0x78]
movzx ebx, byte ptr [rbp+0x80]
or eax, ebx
xor edx, edx
2:
movzx ebx, byte ptr [rbp+0x88]
or ebx, eax
add rdx, 64
cmp rdx, qword ptr [rsp+0x80]
cmove eax, ebx
mov dword ptr [rsp+0x88], eax
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x40], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x40]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x40], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x40]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x40], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x40]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x40], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm16, ymm12, ymm14, 136
vshufps ymm17, ymm12, ymm14, 221
vshufps ymm18, ymm13, ymm15, 136
vshufps ymm19, ymm13, ymm15, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x30]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x30], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x30], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x30]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x30], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x30]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x30], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm20, ymm12, ymm14, 136
vshufps ymm21, ymm12, ymm14, 221
vshufps ymm22, ymm13, ymm15, 136
vshufps ymm23, ymm13, ymm15, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x20], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x20]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x20], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x20]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x20], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x20]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x20], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm24, ymm12, ymm14, 136
vshufps ymm25, ymm12, ymm14, 221
vshufps ymm26, ymm13, ymm15, 136
vshufps ymm27, ymm13, ymm15, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x10]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x10], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x10], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x10]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x10], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x10]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x10], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm28, ymm12, ymm14, 136
vshufps ymm29, ymm12, ymm14, 221
vshufps ymm30, ymm13, ymm15, 136
vshufps ymm31, ymm13, ymm15, 221
vpbroadcastd ymm8, dword ptr [BLAKE3_IV_0+rip]
vpbroadcastd ymm9, dword ptr [BLAKE3_IV_1+rip]
vpbroadcastd ymm10, dword ptr [BLAKE3_IV_2+rip]
vpbroadcastd ymm11, dword ptr [BLAKE3_IV_3+rip]
vmovdqa ymm12, ymmword ptr [rsp]
vmovdqa ymm13, ymmword ptr [rsp+0x40]
vpbroadcastd ymm14, dword ptr [BLAKE3_BLOCK_LEN+rip]
vpbroadcastd ymm15, dword ptr [rsp+0x88]
vpaddd ymm0, ymm0, ymm16
vpaddd ymm1, ymm1, ymm18
vpaddd ymm2, ymm2, ymm20
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm17
vpaddd ymm1, ymm1, ymm19
vpaddd ymm2, ymm2, ymm21
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm24
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm28
vpaddd ymm3, ymm3, ymm30
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm25
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm29
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm18
vpaddd ymm1, ymm1, ymm19
vpaddd ymm2, ymm2, ymm23
vpaddd ymm3, ymm3, ymm20
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm22
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm16
vpaddd ymm3, ymm3, ymm29
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm17
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm25
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm27
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm30
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm19
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm29
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm20
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm18
vpaddd ymm3, ymm3, ymm30
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm22
vpaddd ymm1, ymm1, ymm25
vpaddd ymm2, ymm2, ymm27
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm21
vpaddd ymm1, ymm1, ymm16
vpaddd ymm2, ymm2, ymm31
vpaddd ymm3, ymm3, ymm17
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm26
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm30
vpaddd ymm3, ymm3, ymm29
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm23
vpaddd ymm1, ymm1, ymm25
vpaddd ymm2, ymm2, ymm19
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm20
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm21
vpaddd ymm3, ymm3, ymm17
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm16
vpaddd ymm1, ymm1, ymm18
vpaddd ymm2, ymm2, ymm24
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm28
vpaddd ymm1, ymm1, ymm25
vpaddd ymm2, ymm2, ymm31
vpaddd ymm3, ymm3, ymm30
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm29
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm26
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm23
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm16
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm18
vpaddd ymm1, ymm1, ymm19
vpaddd ymm2, ymm2, ymm17
vpaddd ymm3, ymm3, ymm20
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm25
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm24
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm30
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm28
vpaddd ymm3, ymm3, ymm17
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm29
vpaddd ymm1, ymm1, ymm16
vpaddd ymm2, ymm2, ymm18
vpaddd ymm3, ymm3, ymm20
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm19
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm22
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm27
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm17
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm31
vpaddd ymm1, ymm1, ymm16
vpaddd ymm2, ymm2, ymm25
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm30
vpaddd ymm1, ymm1, ymm18
vpaddd ymm2, ymm2, ymm19
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm26
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm20
vpaddd ymm3, ymm3, ymm29
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpxor ymm0, ymm0, ymm8
vpxor ymm1, ymm1, ymm9
vpxor ymm2, ymm2, ymm10
vpxor ymm3, ymm3, ymm11
vpxor ymm4, ymm4, ymm12
vpxor ymm5, ymm5, ymm13
vpxor ymm6, ymm6, ymm14
vpxor ymm7, ymm7, ymm15
movzx eax, byte ptr [rbp+0x78]
jne 2b
mov rbx, qword ptr [rbp+0x90]
vunpcklps ymm8, ymm0, ymm1
vunpcklps ymm9, ymm2, ymm3
vunpckhps ymm10, ymm0, ymm1
vunpcklps ymm11, ymm4, ymm5
vunpcklps ymm0, ymm6, ymm7
vshufps ymm12, ymm8, ymm9, 78
vblendps ymm1, ymm8, ymm12, 0xCC
vshufps ymm8, ymm11, ymm0, 78
vunpckhps ymm13, ymm2, ymm3
vblendps ymm2, ymm11, ymm8, 0xCC
vblendps ymm3, ymm12, ymm9, 0xCC
vperm2f128 ymm12, ymm1, ymm2, 0x20
vmovups ymmword ptr [rbx], ymm12
vunpckhps ymm14, ymm4, ymm5
vblendps ymm4, ymm8, ymm0, 0xCC
vunpckhps ymm15, ymm6, ymm7
vperm2f128 ymm7, ymm3, ymm4, 0x20
vmovups ymmword ptr [rbx+0x20], ymm7
vshufps ymm5, ymm10, ymm13, 78
vblendps ymm6, ymm5, ymm13, 0xCC
vshufps ymm13, ymm14, ymm15, 78
vblendps ymm10, ymm10, ymm5, 0xCC
vblendps ymm14, ymm14, ymm13, 0xCC
vperm2f128 ymm8, ymm10, ymm14, 0x20
vmovups ymmword ptr [rbx+0x40], ymm8
vblendps ymm15, ymm13, ymm15, 0xCC
vperm2f128 ymm13, ymm6, ymm15, 0x20
vmovups ymmword ptr [rbx+0x60], ymm13
vperm2f128 ymm9, ymm1, ymm2, 0x31
vperm2f128 ymm11, ymm3, ymm4, 0x31
vmovups ymmword ptr [rbx+0x80], ymm9
vperm2f128 ymm14, ymm10, ymm14, 0x31
vperm2f128 ymm15, ymm6, ymm15, 0x31
vmovups ymmword ptr [rbx+0xA0], ymm11
vmovups ymmword ptr [rbx+0xC0], ymm14
vmovups ymmword ptr [rbx+0xE0], ymm15
vmovdqa ymm0, ymmword ptr [rsp]
vmovdqa ymm2, ymmword ptr [rsp+0x40]
vmovdqa32 ymm0 {k1}, ymmword ptr [rsp+0x1*0x20]
vmovdqa32 ymm2 {k1}, ymmword ptr [rsp+0x3*0x20]
vmovdqa ymmword ptr [rsp], ymm0
vmovdqa ymmword ptr [rsp+0x40], ymm2
add rbx, 256
mov qword ptr [rbp+0x90], rbx
add rdi, 64
sub rsi, 8
3:
mov rbx, qword ptr [rbp+0x90]
mov r15, qword ptr [rsp+0x80]
movzx r13, byte ptr [rbp+0x78]
movzx r12, byte ptr [rbp+0x88]
test esi, 0x4
je 3f
vbroadcasti32x4 zmm0, xmmword ptr [rcx]
vbroadcasti32x4 zmm1, xmmword ptr [rcx+0x1*0x10]
vmovdqa xmm12, xmmword ptr [rsp]
vmovdqa xmm13, xmmword ptr [rsp+0x40]
vpunpckldq xmm14, xmm12, xmm13
vpunpckhdq xmm15, xmm12, xmm13
vpermq ymm14, ymm14, 0xDC
vpermq ymm15, ymm15, 0xDC
vpbroadcastd zmm12, dword ptr [BLAKE3_BLOCK_LEN+rip]
vinserti64x4 zmm13, zmm14, ymm15, 0x01
mov eax, 17476
kmovw k2, eax
vpblendmd zmm13 {k2}, zmm13, zmm12
vbroadcasti32x4 zmm15, xmmword ptr [BLAKE3_IV+rip]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov eax, 43690
kmovw k3, eax
mov eax, 34952
kmovw k4, eax
movzx eax, byte ptr [rbp+0x80]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x88], eax
vmovdqa32 zmm2, zmm15
vpbroadcastd zmm8, dword ptr [rsp+0x22*0x4]
vpblendmd zmm3 {k4}, zmm13, zmm8
vmovups zmm8, zmmword ptr [r8+rdx-0x1*0x40]
vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-0x4*0x10], 0x01
vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-0x4*0x10], 0x02
vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-0x4*0x10], 0x03
vmovups zmm9, zmmword ptr [r8+rdx-0x30]
vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-0x3*0x10], 0x01
vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-0x3*0x10], 0x02
vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-0x3*0x10], 0x03
vshufps zmm4, zmm8, zmm9, 136
vshufps zmm5, zmm8, zmm9, 221
vmovups zmm8, zmmword ptr [r8+rdx-0x20]
vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-0x2*0x10], 0x01
vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-0x2*0x10], 0x02
vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-0x2*0x10], 0x03
vmovups zmm9, zmmword ptr [r8+rdx-0x10]
vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-0x1*0x10], 0x01
vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-0x1*0x10], 0x02
vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-0x1*0x10], 0x03
vshufps zmm6, zmm8, zmm9, 136
vshufps zmm7, zmm8, zmm9, 221
vpshufd zmm6, zmm6, 0x93
vpshufd zmm7, zmm7, 0x93
mov al, 7
9:
vpaddd zmm0, zmm0, zmm4
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 16
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 12
vpaddd zmm0, zmm0, zmm5
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 8
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 7
vpshufd zmm0, zmm0, 0x93
vpshufd zmm3, zmm3, 0x4E
vpshufd zmm2, zmm2, 0x39
vpaddd zmm0, zmm0, zmm6
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 16
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 12
vpaddd zmm0, zmm0, zmm7
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 8
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 7
vpshufd zmm0, zmm0, 0x39
vpshufd zmm3, zmm3, 0x4E
vpshufd zmm2, zmm2, 0x93
dec al
jz 9f
vshufps zmm8, zmm4, zmm5, 214
vpshufd zmm9, zmm4, 0x0F
vpshufd zmm4, zmm8, 0x39
vshufps zmm8, zmm6, zmm7, 250
vpblendmd zmm9 {k3}, zmm9, zmm8
vpunpcklqdq zmm8, zmm7, zmm5
vpblendmd zmm8 {k4}, zmm8, zmm6
vpshufd zmm8, zmm8, 0x78
vpunpckhdq zmm5, zmm5, zmm7
vpunpckldq zmm6, zmm6, zmm5
vpshufd zmm7, zmm6, 0x1E
vmovdqa32 zmm5, zmm9
vmovdqa32 zmm6, zmm8
jmp 9b
9:
vpxord zmm0, zmm0, zmm2
vpxord zmm1, zmm1, zmm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vextracti32x4 xmmword ptr [rbx+0x4*0x10], zmm0, 0x02
vextracti32x4 xmmword ptr [rbx+0x5*0x10], zmm1, 0x02
vextracti32x4 xmmword ptr [rbx+0x6*0x10], zmm0, 0x03
vextracti32x4 xmmword ptr [rbx+0x7*0x10], zmm1, 0x03
vmovdqa xmm0, xmmword ptr [rsp]
vmovdqa xmm2, xmmword ptr [rsp+0x40]
vmovdqa32 xmm0 {k1}, xmmword ptr [rsp+0x1*0x10]
vmovdqa32 xmm2 {k1}, xmmword ptr [rsp+0x5*0x10]
vmovdqa xmmword ptr [rsp], xmm0
vmovdqa xmmword ptr [rsp+0x40], xmm2
add rbx, 128
add rdi, 32
sub rsi, 4
3:
test esi, 0x2
je 3f
vbroadcasti128 ymm0, xmmword ptr [rcx]
vbroadcasti128 ymm1, xmmword ptr [rcx+0x10]
vmovd xmm13, dword ptr [rsp]
vpinsrd xmm13, xmm13, dword ptr [rsp+0x40], 1
vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovd xmm14, dword ptr [rsp+0x4]
vpinsrd xmm14, xmm14, dword ptr [rsp+0x44], 1
vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vinserti128 ymm13, ymm13, xmm14, 0x01
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x80]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x88], eax
vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip]
vpbroadcastd ymm8, dword ptr [rsp+0x88]
vpblendd ymm3, ymm13, ymm8, 0x88
vmovups ymm8, ymmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x40], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x30], 0x01
vshufps ymm4, ymm8, ymm9, 136
vshufps ymm5, ymm8, ymm9, 221
vmovups ymm8, ymmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x20], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x10], 0x01
vshufps ymm6, ymm8, ymm9, 136
vshufps ymm7, ymm8, ymm9, 221
vpshufd ymm6, ymm6, 0x93
vpshufd ymm7, ymm7, 0x93
mov al, 7
9:
vpaddd ymm0, ymm0, ymm4
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 16
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 12
vpaddd ymm0, ymm0, ymm5
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 8
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 7
vpshufd ymm0, ymm0, 0x93
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x39
vpaddd ymm0, ymm0, ymm6
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 16
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 12
vpaddd ymm0, ymm0, ymm7
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 8
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 7
vpshufd ymm0, ymm0, 0x39
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x93
dec al
jz 9f
vshufps ymm8, ymm4, ymm5, 214
vpshufd ymm9, ymm4, 0x0F
vpshufd ymm4, ymm8, 0x39
vshufps ymm8, ymm6, ymm7, 250
vpblendd ymm9, ymm9, ymm8, 0xAA
vpunpcklqdq ymm8, ymm7, ymm5
vpblendd ymm8, ymm8, ymm6, 0x88
vpshufd ymm8, ymm8, 0x78
vpunpckhdq ymm5, ymm5, ymm7
vpunpckldq ymm6, ymm6, ymm5
vpshufd ymm7, ymm6, 0x1E
vmovdqa ymm5, ymm9
vmovdqa ymm6, ymm8
jmp 9b
9:
vpxor ymm0, ymm0, ymm2
vpxor ymm1, ymm1, ymm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vmovdqa xmm0, xmmword ptr [rsp]
vmovdqa xmm2, xmmword ptr [rsp+0x40]
vmovdqu32 xmm0 {k1}, xmmword ptr [rsp+0x8]
vmovdqu32 xmm2 {k1}, xmmword ptr [rsp+0x48]
vmovdqa xmmword ptr [rsp], xmm0
vmovdqa xmmword ptr [rsp+0x40], xmm2
add rbx, 64
add rdi, 16
sub rsi, 2
3:
test esi, 0x1
je 4b
vmovdqu xmm0, xmmword ptr [rcx]
vmovdqu xmm1, xmmword ptr [rcx+0x10]
vmovd xmm14, dword ptr [rsp]
vpinsrd xmm14, xmm14, dword ptr [rsp+0x40], 1
vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovdqa xmm15, xmmword ptr [BLAKE3_IV+rip]
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x80]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
vpinsrd xmm3, xmm14, eax, 3
vmovdqa xmm2, xmm15
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vmovups xmm9, xmmword ptr [r8+rdx-0x30]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vmovups xmm9, xmmword ptr [r8+rdx-0x10]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
jmp 4b
.p2align 6
_blake3_compress_in_place_avx512:
blake3_compress_in_place_avx512:
sub rsp, 72
vmovdqa xmmword ptr [rsp], xmm6
vmovdqa xmmword ptr [rsp+0x10], xmm7
vmovdqa xmmword ptr [rsp+0x20], xmm8
vmovdqa xmmword ptr [rsp+0x30], xmm9
vmovdqu xmm0, xmmword ptr [rcx]
vmovdqu xmm1, xmmword ptr [rcx+0x10]
movzx eax, byte ptr [rsp+0x70]
movzx r8d, r8b
shl rax, 32
add r8, rax
vmovq xmm3, r9
vmovq xmm4, r8
vpunpcklqdq xmm3, xmm3, xmm4
vmovaps xmm2, xmmword ptr [BLAKE3_IV+rip]
vmovups xmm8, xmmword ptr [rdx]
vmovups xmm9, xmmword ptr [rdx+0x10]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [rdx+0x20]
vmovups xmm9, xmmword ptr [rdx+0x30]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
vmovdqu xmmword ptr [rcx], xmm0
vmovdqu xmmword ptr [rcx+0x10], xmm1
vmovdqa xmm6, xmmword ptr [rsp]
vmovdqa xmm7, xmmword ptr [rsp+0x10]
vmovdqa xmm8, xmmword ptr [rsp+0x20]
vmovdqa xmm9, xmmword ptr [rsp+0x30]
add rsp, 72
ret
.p2align 6
_blake3_compress_xof_avx512:
blake3_compress_xof_avx512:
sub rsp, 72
vmovdqa xmmword ptr [rsp], xmm6
vmovdqa xmmword ptr [rsp+0x10], xmm7
vmovdqa xmmword ptr [rsp+0x20], xmm8
vmovdqa xmmword ptr [rsp+0x30], xmm9
vmovdqu xmm0, xmmword ptr [rcx]
vmovdqu xmm1, xmmword ptr [rcx+0x10]
movzx eax, byte ptr [rsp+0x70]
movzx r8d, r8b
mov r10, qword ptr [rsp+0x78]
shl rax, 32
add r8, rax
vmovq xmm3, r9
vmovq xmm4, r8
vpunpcklqdq xmm3, xmm3, xmm4
vmovaps xmm2, xmmword ptr [BLAKE3_IV+rip]
vmovups xmm8, xmmword ptr [rdx]
vmovups xmm9, xmmword ptr [rdx+0x10]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [rdx+0x20]
vmovups xmm9, xmmword ptr [rdx+0x30]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
vpxor xmm2, xmm2, xmmword ptr [rcx]
vpxor xmm3, xmm3, xmmword ptr [rcx+0x10]
vmovdqu xmmword ptr [r10], xmm0
vmovdqu xmmword ptr [r10+0x10], xmm1
vmovdqu xmmword ptr [r10+0x20], xmm2
vmovdqu xmmword ptr [r10+0x30], xmm3
vmovdqa xmm6, xmmword ptr [rsp]
vmovdqa xmm7, xmmword ptr [rsp+0x10]
vmovdqa xmm8, xmmword ptr [rsp+0x20]
vmovdqa xmm9, xmmword ptr [rsp+0x30]
add rsp, 72
ret
.section .rodata
.p2align 6
INDEX0:
.long 0, 1, 2, 3, 16, 17, 18, 19
.long 8, 9, 10, 11, 24, 25, 26, 27
INDEX1:
.long 4, 5, 6, 7, 20, 21, 22, 23
.long 12, 13, 14, 15, 28, 29, 30, 31
ADD0:
.long 0, 1, 2, 3, 4, 5, 6, 7
.long 8, 9, 10, 11, 12, 13, 14, 15
ADD1: .long 1
ADD16: .long 16
BLAKE3_BLOCK_LEN:
.long 64
.p2align 6
BLAKE3_IV:
BLAKE3_IV_0:
.long 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_ComIT/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_ComIT/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aerisarn/mesa-uwp
| 61,385
|
src/util/blake3/blake3_sse41_x86-64_unix.S
|
#include "mesa_blake3_visibility.h"
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__ELF__) && defined(__CET__) && defined(__has_include)
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#endif
#if !defined(_CET_ENDBR)
#define _CET_ENDBR
#endif
.intel_syntax noprefix
HIDDEN blake3_hash_many_sse41
HIDDEN _blake3_hash_many_sse41
HIDDEN blake3_compress_in_place_sse41
HIDDEN _blake3_compress_in_place_sse41
HIDDEN blake3_compress_xof_sse41
HIDDEN _blake3_compress_xof_sse41
.global blake3_hash_many_sse41
.global _blake3_hash_many_sse41
.global blake3_compress_in_place_sse41
.global _blake3_compress_in_place_sse41
.global blake3_compress_xof_sse41
.global _blake3_compress_xof_sse41
#ifdef __APPLE__
.text
#else
.section .text
#endif
.p2align 6
_blake3_hash_many_sse41:
blake3_hash_many_sse41:
_CET_ENDBR
push r15
push r14
push r13
push r12
push rbx
push rbp
mov rbp, rsp
sub rsp, 360
and rsp, 0xFFFFFFFFFFFFFFC0
neg r9d
movd xmm0, r9d
pshufd xmm0, xmm0, 0x00
movdqa xmmword ptr [rsp+0x130], xmm0
movdqa xmm1, xmm0
pand xmm1, xmmword ptr [ADD0+rip]
pand xmm0, xmmword ptr [ADD1+rip]
movdqa xmmword ptr [rsp+0x150], xmm0
movd xmm0, r8d
pshufd xmm0, xmm0, 0x00
paddd xmm0, xmm1
movdqa xmmword ptr [rsp+0x110], xmm0
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm1, xmm0
shr r8, 32
movd xmm2, r8d
pshufd xmm2, xmm2, 0x00
psubd xmm2, xmm1
movdqa xmmword ptr [rsp+0x120], xmm2
mov rbx, qword ptr [rbp+0x50]
mov r15, rdx
shl r15, 6
movzx r13d, byte ptr [rbp+0x38]
movzx r12d, byte ptr [rbp+0x48]
cmp rsi, 4
jc 3f
2:
movdqu xmm3, xmmword ptr [rcx]
pshufd xmm0, xmm3, 0x00
pshufd xmm1, xmm3, 0x55
pshufd xmm2, xmm3, 0xAA
pshufd xmm3, xmm3, 0xFF
movdqu xmm7, xmmword ptr [rcx+0x10]
pshufd xmm4, xmm7, 0x00
pshufd xmm5, xmm7, 0x55
pshufd xmm6, xmm7, 0xAA
pshufd xmm7, xmm7, 0xFF
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
9:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movdqu xmm8, xmmword ptr [r8+rdx-0x40]
movdqu xmm9, xmmword ptr [r9+rdx-0x40]
movdqu xmm10, xmmword ptr [r10+rdx-0x40]
movdqu xmm11, xmmword ptr [r11+rdx-0x40]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp], xmm8
movdqa xmmword ptr [rsp+0x10], xmm9
movdqa xmmword ptr [rsp+0x20], xmm12
movdqa xmmword ptr [rsp+0x30], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x30]
movdqu xmm9, xmmword ptr [r9+rdx-0x30]
movdqu xmm10, xmmword ptr [r10+rdx-0x30]
movdqu xmm11, xmmword ptr [r11+rdx-0x30]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x40], xmm8
movdqa xmmword ptr [rsp+0x50], xmm9
movdqa xmmword ptr [rsp+0x60], xmm12
movdqa xmmword ptr [rsp+0x70], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x20]
movdqu xmm9, xmmword ptr [r9+rdx-0x20]
movdqu xmm10, xmmword ptr [r10+rdx-0x20]
movdqu xmm11, xmmword ptr [r11+rdx-0x20]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x80], xmm8
movdqa xmmword ptr [rsp+0x90], xmm9
movdqa xmmword ptr [rsp+0xA0], xmm12
movdqa xmmword ptr [rsp+0xB0], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x10]
movdqu xmm9, xmmword ptr [r9+rdx-0x10]
movdqu xmm10, xmmword ptr [r10+rdx-0x10]
movdqu xmm11, xmmword ptr [r11+rdx-0x10]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0xC0], xmm8
movdqa xmmword ptr [rsp+0xD0], xmm9
movdqa xmmword ptr [rsp+0xE0], xmm12
movdqa xmmword ptr [rsp+0xF0], xmm13
movdqa xmm9, xmmword ptr [BLAKE3_IV_1+rip]
movdqa xmm10, xmmword ptr [BLAKE3_IV_2+rip]
movdqa xmm11, xmmword ptr [BLAKE3_IV_3+rip]
movdqa xmm12, xmmword ptr [rsp+0x110]
movdqa xmm13, xmmword ptr [rsp+0x120]
movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN+rip]
movd xmm15, eax
pshufd xmm15, xmm15, 0x00
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [BLAKE3_IV_0+rip]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x80]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x70]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xB0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x50]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xC0]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xA0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0x60]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xF0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
pxor xmm0, xmm8
pxor xmm1, xmm9
pxor xmm2, xmm10
pxor xmm3, xmm11
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
pxor xmm4, xmm12
pxor xmm5, xmm13
pxor xmm6, xmm14
pxor xmm7, xmm15
mov eax, r13d
jne 9b
movdqa xmm9, xmm0
punpckldq xmm0, xmm1
punpckhdq xmm9, xmm1
movdqa xmm11, xmm2
punpckldq xmm2, xmm3
punpckhdq xmm11, xmm3
movdqa xmm1, xmm0
punpcklqdq xmm0, xmm2
punpckhqdq xmm1, xmm2
movdqa xmm3, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm3, xmm11
movdqu xmmword ptr [rbx], xmm0
movdqu xmmword ptr [rbx+0x20], xmm1
movdqu xmmword ptr [rbx+0x40], xmm9
movdqu xmmword ptr [rbx+0x60], xmm3
movdqa xmm9, xmm4
punpckldq xmm4, xmm5
punpckhdq xmm9, xmm5
movdqa xmm11, xmm6
punpckldq xmm6, xmm7
punpckhdq xmm11, xmm7
movdqa xmm5, xmm4
punpcklqdq xmm4, xmm6
punpckhqdq xmm5, xmm6
movdqa xmm7, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm7, xmm11
movdqu xmmword ptr [rbx+0x10], xmm4
movdqu xmmword ptr [rbx+0x30], xmm5
movdqu xmmword ptr [rbx+0x50], xmm9
movdqu xmmword ptr [rbx+0x70], xmm7
movdqa xmm1, xmmword ptr [rsp+0x110]
movdqa xmm0, xmm1
paddd xmm1, xmmword ptr [rsp+0x150]
movdqa xmmword ptr [rsp+0x110], xmm1
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm0, xmm1
movdqa xmm1, xmmword ptr [rsp+0x120]
psubd xmm1, xmm0
movdqa xmmword ptr [rsp+0x120], xmm1
add rbx, 128
add rdi, 32
sub rsi, 4
cmp rsi, 4
jnc 2b
test rsi, rsi
jnz 3f
4:
mov rsp, rbp
pop rbp
pop rbx
pop r12
pop r13
pop r14
pop r15
ret
.p2align 5
3:
test esi, 0x2
je 3f
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movaps xmm8, xmm0
movaps xmm9, xmm1
movd xmm13, dword ptr [rsp+0x110]
pinsrd xmm13, dword ptr [rsp+0x120], 1
pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
movaps xmmword ptr [rsp], xmm13
movd xmm14, dword ptr [rsp+0x114]
pinsrd xmm14, dword ptr [rsp+0x124], 1
pinsrd xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
movaps xmmword ptr [rsp+0x10], xmm14
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movaps xmm10, xmm2
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm3, xmm4
shufps xmm4, xmm5, 136
shufps xmm3, xmm5, 221
movaps xmm5, xmm3
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm3, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm3, xmm7, 221
pshufd xmm7, xmm3, 0x93
movups xmm12, xmmword ptr [r9+rdx-0x40]
movups xmm13, xmmword ptr [r9+rdx-0x30]
movaps xmm11, xmm12
shufps xmm12, xmm13, 136
shufps xmm11, xmm13, 221
movaps xmm13, xmm11
movups xmm14, xmmword ptr [r9+rdx-0x20]
movups xmm15, xmmword ptr [r9+rdx-0x10]
movaps xmm11, xmm14
shufps xmm14, xmm15, 136
pshufd xmm14, xmm14, 0x93
shufps xmm11, xmm15, 221
pshufd xmm15, xmm11, 0x93
movaps xmm3, xmmword ptr [rsp]
movaps xmm11, xmmword ptr [rsp+0x10]
pinsrd xmm3, eax, 3
pinsrd xmm11, eax, 3
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm8, xmm12
movaps xmmword ptr [rsp+0x20], xmm4
movaps xmmword ptr [rsp+0x30], xmm12
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movaps xmm12, xmmword ptr [ROT16+rip]
pshufb xmm3, xmm12
pshufb xmm11, xmm12
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm5
paddd xmm8, xmm13
movaps xmmword ptr [rsp+0x40], xmm5
movaps xmmword ptr [rsp+0x50], xmm13
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movaps xmm13, xmmword ptr [ROT8+rip]
pshufb xmm3, xmm13
pshufb xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x93
pshufd xmm8, xmm8, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x39
pshufd xmm10, xmm10, 0x39
paddd xmm0, xmm6
paddd xmm8, xmm14
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshufb xmm3, xmm12
pshufb xmm11, xmm12
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm7
paddd xmm8, xmm15
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshufb xmm3, xmm13
pshufb xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x39
pshufd xmm8, xmm8, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x93
pshufd xmm10, xmm10, 0x93
dec al
je 9f
movdqa xmm12, xmmword ptr [rsp+0x20]
movdqa xmm5, xmmword ptr [rsp+0x40]
pshufd xmm13, xmm12, 0x0F
shufps xmm12, xmm5, 214
pshufd xmm4, xmm12, 0x39
movdqa xmm12, xmm6
shufps xmm12, xmm7, 250
pblendw xmm13, xmm12, 0xCC
movdqa xmm12, xmm7
punpcklqdq xmm12, xmm5
pblendw xmm12, xmm6, 0xC0
pshufd xmm12, xmm12, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmmword ptr [rsp+0x20], xmm13
movdqa xmmword ptr [rsp+0x40], xmm12
movdqa xmm5, xmmword ptr [rsp+0x30]
movdqa xmm13, xmmword ptr [rsp+0x50]
pshufd xmm6, xmm5, 0x0F
shufps xmm5, xmm13, 214
pshufd xmm12, xmm5, 0x39
movdqa xmm5, xmm14
shufps xmm5, xmm15, 250
pblendw xmm6, xmm5, 0xCC
movdqa xmm5, xmm15
punpcklqdq xmm5, xmm13
pblendw xmm5, xmm14, 0xC0
pshufd xmm5, xmm5, 0x78
punpckhdq xmm13, xmm15
punpckldq xmm14, xmm13
pshufd xmm15, xmm14, 0x1E
movdqa xmm13, xmm6
movdqa xmm14, xmm5
movdqa xmm5, xmmword ptr [rsp+0x20]
movdqa xmm6, xmmword ptr [rsp+0x40]
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm8, xmm10
pxor xmm9, xmm11
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
movups xmmword ptr [rbx+0x20], xmm8
movups xmmword ptr [rbx+0x30], xmm9
movdqa xmm0, xmmword ptr [rsp+0x130]
movdqa xmm1, xmmword ptr [rsp+0x110]
movdqa xmm2, xmmword ptr [rsp+0x120]
movdqu xmm3, xmmword ptr [rsp+0x118]
movdqu xmm4, xmmword ptr [rsp+0x128]
blendvps xmm1, xmm3, xmm0
blendvps xmm2, xmm4, xmm0
movdqa xmmword ptr [rsp+0x110], xmm1
movdqa xmmword ptr [rsp+0x120], xmm2
add rdi, 16
add rbx, 64
sub rsi, 2
3:
test esi, 0x1
je 4b
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movd xmm13, dword ptr [rsp+0x110]
pinsrd xmm13, dword ptr [rsp+0x120], 1
pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
movaps xmm14, xmmword ptr [ROT8+rip]
movaps xmm15, xmmword ptr [ROT16+rip]
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movaps xmm3, xmm13
pinsrd xmm3, eax, 3
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pblendw xmm9, xmm8, 0xCC
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
pblendw xmm8, xmm6, 0xC0
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
jmp 4b
.p2align 6
blake3_compress_in_place_sse41:
_blake3_compress_in_place_sse41:
_CET_ENDBR
movups xmm0, xmmword ptr [rdi]
movups xmm1, xmmword ptr [rdi+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
shl r8, 32
add rdx, r8
movq xmm3, rcx
movq xmm4, rdx
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rsi]
movups xmm5, xmmword ptr [rsi+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rsi+0x20]
movups xmm7, xmmword ptr [rsi+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
movaps xmm14, xmmword ptr [ROT8+rip]
movaps xmm15, xmmword ptr [ROT16+rip]
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pblendw xmm9, xmm8, 0xCC
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
pblendw xmm8, xmm6, 0xC0
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
movups xmmword ptr [rdi], xmm0
movups xmmword ptr [rdi+0x10], xmm1
ret
.p2align 6
blake3_compress_xof_sse41:
_blake3_compress_xof_sse41:
_CET_ENDBR
movups xmm0, xmmword ptr [rdi]
movups xmm1, xmmword ptr [rdi+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movzx eax, r8b
movzx edx, dl
shl rax, 32
add rdx, rax
movq xmm3, rcx
movq xmm4, rdx
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rsi]
movups xmm5, xmmword ptr [rsi+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rsi+0x20]
movups xmm7, xmmword ptr [rsi+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
movaps xmm14, xmmword ptr [ROT8+rip]
movaps xmm15, xmmword ptr [ROT16+rip]
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pblendw xmm9, xmm8, 0xCC
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
pblendw xmm8, xmm6, 0xC0
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
movdqu xmm4, xmmword ptr [rdi]
movdqu xmm5, xmmword ptr [rdi+0x10]
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm2, xmm4
pxor xmm3, xmm5
movups xmmword ptr [r9], xmm0
movups xmmword ptr [r9+0x10], xmm1
movups xmmword ptr [r9+0x20], xmm2
movups xmmword ptr [r9+0x30], xmm3
ret
#ifdef __APPLE__
.static_data
#else
.section .rodata
#endif
.p2align 6
BLAKE3_IV:
.long 0x6A09E667, 0xBB67AE85
.long 0x3C6EF372, 0xA54FF53A
ROT16:
.byte 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13
ROT8:
.byte 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
ADD0:
.long 0, 1, 2, 3
ADD1:
.long 4, 4, 4, 4
BLAKE3_IV_0:
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
BLAKE3_BLOCK_LEN:
.long 64, 64, 64, 64
CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
|
aerisarn/mesa-uwp
| 66,147
|
src/util/blake3/blake3_avx2_x86-64_unix.S
|
#include "mesa_blake3_visibility.h"
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__ELF__) && defined(__CET__) && defined(__has_include)
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#endif
#if !defined(_CET_ENDBR)
#define _CET_ENDBR
#endif
.intel_syntax noprefix
HIDDEN _blake3_hash_many_avx2
HIDDEN blake3_hash_many_avx2
.global _blake3_hash_many_avx2
.global blake3_hash_many_avx2
#ifdef __APPLE__
.text
#else
.section .text
#endif
.p2align 6
_blake3_hash_many_avx2:
blake3_hash_many_avx2:
_CET_ENDBR
push r15
push r14
push r13
push r12
push rbx
push rbp
mov rbp, rsp
sub rsp, 680
and rsp, 0xFFFFFFFFFFFFFFC0
neg r9d
vmovd xmm0, r9d
vpbroadcastd ymm0, xmm0
vmovdqa ymmword ptr [rsp+0x280], ymm0
vpand ymm1, ymm0, ymmword ptr [ADD0+rip]
vpand ymm2, ymm0, ymmword ptr [ADD1+rip]
vmovdqa ymmword ptr [rsp+0x220], ymm2
vmovd xmm2, r8d
vpbroadcastd ymm2, xmm2
vpaddd ymm2, ymm2, ymm1
vmovdqa ymmword ptr [rsp+0x240], ymm2
vpxor ymm1, ymm1, ymmword ptr [CMP_MSB_MASK+rip]
vpxor ymm2, ymm2, ymmword ptr [CMP_MSB_MASK+rip]
vpcmpgtd ymm2, ymm1, ymm2
shr r8, 32
vmovd xmm3, r8d
vpbroadcastd ymm3, xmm3
vpsubd ymm3, ymm3, ymm2
vmovdqa ymmword ptr [rsp+0x260], ymm3
shl rdx, 6
mov qword ptr [rsp+0x2A0], rdx
cmp rsi, 8
jc 3f
2:
vpbroadcastd ymm0, dword ptr [rcx]
vpbroadcastd ymm1, dword ptr [rcx+0x4]
vpbroadcastd ymm2, dword ptr [rcx+0x8]
vpbroadcastd ymm3, dword ptr [rcx+0xC]
vpbroadcastd ymm4, dword ptr [rcx+0x10]
vpbroadcastd ymm5, dword ptr [rcx+0x14]
vpbroadcastd ymm6, dword ptr [rcx+0x18]
vpbroadcastd ymm7, dword ptr [rcx+0x1C]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x20]
mov r13, qword ptr [rdi+0x28]
mov r14, qword ptr [rdi+0x30]
mov r15, qword ptr [rdi+0x38]
movzx eax, byte ptr [rbp+0x38]
movzx ebx, byte ptr [rbp+0x40]
or eax, ebx
xor edx, edx
.p2align 5
9:
movzx ebx, byte ptr [rbp+0x48]
or ebx, eax
add rdx, 64
cmp rdx, qword ptr [rsp+0x2A0]
cmove eax, ebx
mov dword ptr [rsp+0x200], eax
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x40], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x40]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x40], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x40]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x40], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x40]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x40], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0x20], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0x40], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0x60], ymm11
vmovups xmm8, xmmword ptr [r8+rdx-0x30]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x30], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x30], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x30]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x30], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x30]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x30], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp+0x80], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0xA0], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0xC0], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0xE0], ymm11
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x20], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x20]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x20], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x20]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x20], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x20]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x20], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp+0x100], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0x120], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0x140], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0x160], ymm11
vmovups xmm8, xmmword ptr [r8+rdx-0x10]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x10], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x10], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x10]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x10], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x10]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x10], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp+0x180], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0x1A0], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0x1C0], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0x1E0], ymm11
vpbroadcastd ymm15, dword ptr [rsp+0x200]
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r12+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r13+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r14+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
prefetcht0 [r15+rdx+0x80]
vpaddd ymm0, ymm0, ymmword ptr [rsp]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm0, ymmword ptr [rsp+0x240]
vpxor ymm13, ymm1, ymmword ptr [rsp+0x260]
vpxor ymm14, ymm2, ymmword ptr [BLAKE3_BLOCK_LEN+rip]
vpxor ymm15, ymm3, ymm15
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [BLAKE3_IV_0+rip]
vpaddd ymm9, ymm13, ymmword ptr [BLAKE3_IV_1+rip]
vpaddd ymm10, ymm14, ymmword ptr [BLAKE3_IV_2+rip]
vpaddd ymm11, ymm15, ymmword ptr [BLAKE3_IV_3+rip]
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x100]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xE0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x160]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xA0]
vpaddd ymm1, ymm1, ymmword ptr [rsp]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x180]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x140]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0]
vpaddd ymm1, ymm1, ymmword ptr [rsp]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xC0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1E0]
vpaddd ymm1, ymm1, ymmword ptr [rsp]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vpxor ymm0, ymm0, ymm8
vpxor ymm1, ymm1, ymm9
vpxor ymm2, ymm2, ymm10
vpxor ymm3, ymm3, ymm11
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpxor ymm4, ymm4, ymm12
vpxor ymm5, ymm5, ymm13
vpxor ymm6, ymm6, ymm14
vpxor ymm7, ymm7, ymm15
movzx eax, byte ptr [rbp+0x38]
jne 9b
mov rbx, qword ptr [rbp+0x50]
vunpcklps ymm8, ymm0, ymm1
vunpcklps ymm9, ymm2, ymm3
vunpckhps ymm10, ymm0, ymm1
vunpcklps ymm11, ymm4, ymm5
vunpcklps ymm0, ymm6, ymm7
vshufps ymm12, ymm8, ymm9, 78
vblendps ymm1, ymm8, ymm12, 0xCC
vshufps ymm8, ymm11, ymm0, 78
vunpckhps ymm13, ymm2, ymm3
vblendps ymm2, ymm11, ymm8, 0xCC
vblendps ymm3, ymm12, ymm9, 0xCC
vperm2f128 ymm12, ymm1, ymm2, 0x20
vmovups ymmword ptr [rbx], ymm12
vunpckhps ymm14, ymm4, ymm5
vblendps ymm4, ymm8, ymm0, 0xCC
vunpckhps ymm15, ymm6, ymm7
vperm2f128 ymm7, ymm3, ymm4, 0x20
vmovups ymmword ptr [rbx+0x20], ymm7
vshufps ymm5, ymm10, ymm13, 78
vblendps ymm6, ymm5, ymm13, 0xCC
vshufps ymm13, ymm14, ymm15, 78
vblendps ymm10, ymm10, ymm5, 0xCC
vblendps ymm14, ymm14, ymm13, 0xCC
vperm2f128 ymm8, ymm10, ymm14, 0x20
vmovups ymmword ptr [rbx+0x40], ymm8
vblendps ymm15, ymm13, ymm15, 0xCC
vperm2f128 ymm13, ymm6, ymm15, 0x20
vmovups ymmword ptr [rbx+0x60], ymm13
vperm2f128 ymm9, ymm1, ymm2, 0x31
vperm2f128 ymm11, ymm3, ymm4, 0x31
vmovups ymmword ptr [rbx+0x80], ymm9
vperm2f128 ymm14, ymm10, ymm14, 0x31
vperm2f128 ymm15, ymm6, ymm15, 0x31
vmovups ymmword ptr [rbx+0xA0], ymm11
vmovups ymmword ptr [rbx+0xC0], ymm14
vmovups ymmword ptr [rbx+0xE0], ymm15
vmovdqa ymm0, ymmword ptr [rsp+0x220]
vpaddd ymm1, ymm0, ymmword ptr [rsp+0x240]
vmovdqa ymmword ptr [rsp+0x240], ymm1
vpxor ymm0, ymm0, ymmword ptr [CMP_MSB_MASK+rip]
vpxor ymm2, ymm1, ymmword ptr [CMP_MSB_MASK+rip]
vpcmpgtd ymm2, ymm0, ymm2
vmovdqa ymm0, ymmword ptr [rsp+0x260]
vpsubd ymm2, ymm0, ymm2
vmovdqa ymmword ptr [rsp+0x260], ymm2
add rdi, 64
add rbx, 256
mov qword ptr [rbp+0x50], rbx
sub rsi, 8
cmp rsi, 8
jnc 2b
test rsi, rsi
jnz 3f
4:
vzeroupper
mov rsp, rbp
pop rbp
pop rbx
pop r12
pop r13
pop r14
pop r15
ret
.p2align 5
3:
mov rbx, qword ptr [rbp+0x50]
mov r15, qword ptr [rsp+0x2A0]
movzx r13d, byte ptr [rbp+0x38]
movzx r12d, byte ptr [rbp+0x48]
test rsi, 0x4
je 3f
vbroadcasti128 ymm0, xmmword ptr [rcx]
vbroadcasti128 ymm1, xmmword ptr [rcx+0x10]
vmovdqa ymm8, ymm0
vmovdqa ymm9, ymm1
vbroadcasti128 ymm12, xmmword ptr [rsp+0x240]
vbroadcasti128 ymm13, xmmword ptr [rsp+0x260]
vpunpckldq ymm14, ymm12, ymm13
vpunpckhdq ymm15, ymm12, ymm13
vpermq ymm14, ymm14, 0x50
vpermq ymm15, ymm15, 0x50
vbroadcasti128 ymm12, xmmword ptr [BLAKE3_BLOCK_LEN+rip]
vpblendd ymm14, ymm14, ymm12, 0x44
vpblendd ymm15, ymm15, ymm12, 0x44
vmovdqa ymmword ptr [rsp], ymm14
vmovdqa ymmword ptr [rsp+0x20], ymm15
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x200], eax
vmovups ymm2, ymmword ptr [r8+rdx-0x40]
vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x40], 0x01
vmovups ymm3, ymmword ptr [r8+rdx-0x30]
vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x30], 0x01
vshufps ymm4, ymm2, ymm3, 136
vshufps ymm5, ymm2, ymm3, 221
vmovups ymm2, ymmword ptr [r8+rdx-0x20]
vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x20], 0x01
vmovups ymm3, ymmword ptr [r8+rdx-0x10]
vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x10], 0x01
vshufps ymm6, ymm2, ymm3, 136
vshufps ymm7, ymm2, ymm3, 221
vpshufd ymm6, ymm6, 0x93
vpshufd ymm7, ymm7, 0x93
vmovups ymm10, ymmword ptr [r10+rdx-0x40]
vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x40], 0x01
vmovups ymm11, ymmword ptr [r10+rdx-0x30]
vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x30], 0x01
vshufps ymm12, ymm10, ymm11, 136
vshufps ymm13, ymm10, ymm11, 221
vmovups ymm10, ymmword ptr [r10+rdx-0x20]
vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x20], 0x01
vmovups ymm11, ymmword ptr [r10+rdx-0x10]
vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x10], 0x01
vshufps ymm14, ymm10, ymm11, 136
vshufps ymm15, ymm10, ymm11, 221
vpshufd ymm14, ymm14, 0x93
vpshufd ymm15, ymm15, 0x93
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
vpbroadcastd ymm2, dword ptr [rsp+0x200]
vmovdqa ymm3, ymmword ptr [rsp]
vmovdqa ymm11, ymmword ptr [rsp+0x20]
vpblendd ymm3, ymm3, ymm2, 0x88
vpblendd ymm11, ymm11, ymm2, 0x88
vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip]
vmovdqa ymm10, ymm2
mov al, 7
9:
vpaddd ymm0, ymm0, ymm4
vpaddd ymm8, ymm8, ymm12
vmovdqa ymmword ptr [rsp+0x40], ymm4
nop
vmovdqa ymmword ptr [rsp+0x60], ymm12
nop
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT16+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 12
vpslld ymm9, ymm9, 20
vpor ymm9, ymm9, ymm4
vpaddd ymm0, ymm0, ymm5
vpaddd ymm8, ymm8, ymm13
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vmovdqa ymmword ptr [rsp+0x80], ymm5
vmovdqa ymmword ptr [rsp+0xA0], ymm13
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT8+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 7
vpslld ymm9, ymm9, 25
vpor ymm9, ymm9, ymm4
vpshufd ymm0, ymm0, 0x93
vpshufd ymm8, ymm8, 0x93
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm11, ymm11, 0x4E
vpshufd ymm2, ymm2, 0x39
vpshufd ymm10, ymm10, 0x39
vpaddd ymm0, ymm0, ymm6
vpaddd ymm8, ymm8, ymm14
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT16+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 12
vpslld ymm9, ymm9, 20
vpor ymm9, ymm9, ymm4
vpaddd ymm0, ymm0, ymm7
vpaddd ymm8, ymm8, ymm15
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT8+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 7
vpslld ymm9, ymm9, 25
vpor ymm9, ymm9, ymm4
vpshufd ymm0, ymm0, 0x39
vpshufd ymm8, ymm8, 0x39
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm11, ymm11, 0x4E
vpshufd ymm2, ymm2, 0x93
vpshufd ymm10, ymm10, 0x93
dec al
je 9f
vmovdqa ymm4, ymmword ptr [rsp+0x40]
vmovdqa ymm5, ymmword ptr [rsp+0x80]
vshufps ymm12, ymm4, ymm5, 214
vpshufd ymm13, ymm4, 0x0F
vpshufd ymm4, ymm12, 0x39
vshufps ymm12, ymm6, ymm7, 250
vpblendd ymm13, ymm13, ymm12, 0xAA
vpunpcklqdq ymm12, ymm7, ymm5
vpblendd ymm12, ymm12, ymm6, 0x88
vpshufd ymm12, ymm12, 0x78
vpunpckhdq ymm5, ymm5, ymm7
vpunpckldq ymm6, ymm6, ymm5
vpshufd ymm7, ymm6, 0x1E
vmovdqa ymmword ptr [rsp+0x40], ymm13
vmovdqa ymmword ptr [rsp+0x80], ymm12
vmovdqa ymm12, ymmword ptr [rsp+0x60]
vmovdqa ymm13, ymmword ptr [rsp+0xA0]
vshufps ymm5, ymm12, ymm13, 214
vpshufd ymm6, ymm12, 0x0F
vpshufd ymm12, ymm5, 0x39
vshufps ymm5, ymm14, ymm15, 250
vpblendd ymm6, ymm6, ymm5, 0xAA
vpunpcklqdq ymm5, ymm15, ymm13
vpblendd ymm5, ymm5, ymm14, 0x88
vpshufd ymm5, ymm5, 0x78
vpunpckhdq ymm13, ymm13, ymm15
vpunpckldq ymm14, ymm14, ymm13
vpshufd ymm15, ymm14, 0x1E
vmovdqa ymm13, ymm6
vmovdqa ymm14, ymm5
vmovdqa ymm5, ymmword ptr [rsp+0x40]
vmovdqa ymm6, ymmword ptr [rsp+0x80]
jmp 9b
9:
vpxor ymm0, ymm0, ymm2
vpxor ymm1, ymm1, ymm3
vpxor ymm8, ymm8, ymm10
vpxor ymm9, ymm9, ymm11
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vmovdqu xmmword ptr [rbx+0x40], xmm8
vmovdqu xmmword ptr [rbx+0x50], xmm9
vextracti128 xmmword ptr [rbx+0x60], ymm8, 0x01
vextracti128 xmmword ptr [rbx+0x70], ymm9, 0x01
vmovaps xmm8, xmmword ptr [rsp+0x280]
vmovaps xmm0, xmmword ptr [rsp+0x240]
vmovaps xmm1, xmmword ptr [rsp+0x250]
vmovaps xmm2, xmmword ptr [rsp+0x260]
vmovaps xmm3, xmmword ptr [rsp+0x270]
vblendvps xmm0, xmm0, xmm1, xmm8
vblendvps xmm2, xmm2, xmm3, xmm8
vmovaps xmmword ptr [rsp+0x240], xmm0
vmovaps xmmword ptr [rsp+0x260], xmm2
add rbx, 128
add rdi, 32
sub rsi, 4
3:
test rsi, 0x2
je 3f
vbroadcasti128 ymm0, xmmword ptr [rcx]
vbroadcasti128 ymm1, xmmword ptr [rcx+0x10]
vmovd xmm13, dword ptr [rsp+0x240]
vpinsrd xmm13, xmm13, dword ptr [rsp+0x260], 1
vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovd xmm14, dword ptr [rsp+0x244]
vpinsrd xmm14, xmm14, dword ptr [rsp+0x264], 1
vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vinserti128 ymm13, ymm13, xmm14, 0x01
vbroadcasti128 ymm14, xmmword ptr [ROT16+rip]
vbroadcasti128 ymm15, xmmword ptr [ROT8+rip]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x200], eax
vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip]
vpbroadcastd ymm8, dword ptr [rsp+0x200]
vpblendd ymm3, ymm13, ymm8, 0x88
vmovups ymm8, ymmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x40], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x30], 0x01
vshufps ymm4, ymm8, ymm9, 136
vshufps ymm5, ymm8, ymm9, 221
vmovups ymm8, ymmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x20], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x10], 0x01
vshufps ymm6, ymm8, ymm9, 136
vshufps ymm7, ymm8, ymm9, 221
vpshufd ymm6, ymm6, 0x93
vpshufd ymm7, ymm7, 0x93
mov al, 7
9:
vpaddd ymm0, ymm0, ymm4
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm14
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm8
vpaddd ymm0, ymm0, ymm5
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm15
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm8
vpshufd ymm0, ymm0, 0x93
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x39
vpaddd ymm0, ymm0, ymm6
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm14
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm8
vpaddd ymm0, ymm0, ymm7
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm15
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm8
vpshufd ymm0, ymm0, 0x39
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x93
dec al
jz 9f
vshufps ymm8, ymm4, ymm5, 214
vpshufd ymm9, ymm4, 0x0F
vpshufd ymm4, ymm8, 0x39
vshufps ymm8, ymm6, ymm7, 250
vpblendd ymm9, ymm9, ymm8, 0xAA
vpunpcklqdq ymm8, ymm7, ymm5
vpblendd ymm8, ymm8, ymm6, 0x88
vpshufd ymm8, ymm8, 0x78
vpunpckhdq ymm5, ymm5, ymm7
vpunpckldq ymm6, ymm6, ymm5
vpshufd ymm7, ymm6, 0x1E
vmovdqa ymm5, ymm9
vmovdqa ymm6, ymm8
jmp 9b
9:
vpxor ymm0, ymm0, ymm2
vpxor ymm1, ymm1, ymm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vmovaps ymm8, ymmword ptr [rsp+0x280]
vmovaps ymm0, ymmword ptr [rsp+0x240]
vmovups ymm1, ymmword ptr [rsp+0x248]
vmovaps ymm2, ymmword ptr [rsp+0x260]
vmovups ymm3, ymmword ptr [rsp+0x268]
vblendvps ymm0, ymm0, ymm1, ymm8
vblendvps ymm2, ymm2, ymm3, ymm8
vmovaps ymmword ptr [rsp+0x240], ymm0
vmovaps ymmword ptr [rsp+0x260], ymm2
add rbx, 64
add rdi, 16
sub rsi, 2
3:
test rsi, 0x1
je 4b
vmovdqu xmm0, xmmword ptr [rcx]
vmovdqu xmm1, xmmword ptr [rcx+0x10]
vmovd xmm3, dword ptr [rsp+0x240]
vpinsrd xmm3, xmm3, dword ptr [rsp+0x260], 1
vpinsrd xmm13, xmm3, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovdqa xmm14, xmmword ptr [ROT16+rip]
vmovdqa xmm15, xmmword ptr [ROT8+rip]
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
vmovdqa xmm2, xmmword ptr [BLAKE3_IV+rip]
vmovdqa xmm3, xmm13
vpinsrd xmm3, xmm3, eax, 3
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vmovups xmm9, xmmword ptr [r8+rdx-0x30]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vmovups xmm9, xmmword ptr [r8+rdx-0x10]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm14
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 12
vpslld xmm1, xmm1, 20
vpor xmm1, xmm1, xmm8
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm15
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 7
vpslld xmm1, xmm1, 25
vpor xmm1, xmm1, xmm8
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm14
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 12
vpslld xmm1, xmm1, 20
vpor xmm1, xmm1, xmm8
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm15
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 7
vpslld xmm1, xmm1, 25
vpor xmm1, xmm1, xmm8
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
jmp 4b
#ifdef __APPLE__
.static_data
#else
.section .rodata
#endif
.p2align 6
ADD0:
.long 0, 1, 2, 3, 4, 5, 6, 7
ADD1:
.long 8, 8, 8, 8, 8, 8, 8, 8
BLAKE3_IV_0:
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
BLAKE3_BLOCK_LEN:
.long 0x00000040, 0x00000040, 0x00000040, 0x00000040
.long 0x00000040, 0x00000040, 0x00000040, 0x00000040
ROT16:
.byte 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13
ROT8:
.byte 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
BLAKE3_IV:
.long 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/I2C/I2C_TwoBoards_ComIT/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aenu1/aps3e
| 289,771
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/sha512_asm.S
|
/* sha512_asm.S */
/*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef WOLFSSL_USER_SETTINGS
#ifdef WOLFSSL_USER_SETTINGS_ASM
/*
* user_settings_asm.h is a file generated by the script user_settings_asm.sh.
* The script takes in a user_settings.h and produces user_settings_asm.h, which
* is a stripped down version of user_settings.h containing only preprocessor
* directives. This makes the header safe to include in assembly (.S) files.
*/
#include "user_settings_asm.h"
#else
/*
* Note: if user_settings.h contains any C code (e.g. a typedef or function
* prototype), including it here in an assembly (.S) file will cause an
* assembler failure. See user_settings_asm.h above.
*/
#include "user_settings.h"
#endif /* WOLFSSL_USER_SETTINGS_ASM */
#endif /* WOLFSSL_USER_SETTINGS */
#ifndef HAVE_INTEL_AVX1
#define HAVE_INTEL_AVX1
#endif /* HAVE_INTEL_AVX1 */
#ifndef NO_AVX2_SUPPORT
#define HAVE_INTEL_AVX2
#endif /* NO_AVX2_SUPPORT */
#ifdef HAVE_INTEL_AVX1
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_sha512_k:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xfc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x6ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x6f067aa72176fba,0xa637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_sha512_flip_mask:
.quad 0x1020304050607, 0x8090a0b0c0d0e0f
#ifndef __APPLE__
.text
.globl Transform_Sha512_AVX1
.type Transform_Sha512_AVX1,@function
.align 16
Transform_Sha512_AVX1:
#else
.section __TEXT,__text
.globl _Transform_Sha512_AVX1
.p2align 4
_Transform_Sha512_AVX1:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x88, %rsp
leaq 64(%rdi), %rax
vmovdqa L_avx1_sha512_flip_mask(%rip), %xmm14
movq (%rdi), %r8
movq 8(%rdi), %r9
movq 16(%rdi), %r10
movq 24(%rdi), %r11
movq 32(%rdi), %r12
movq 40(%rdi), %r13
movq 48(%rdi), %r14
movq 56(%rdi), %r15
vmovdqu (%rax), %xmm0
vmovdqu 16(%rax), %xmm1
vpshufb %xmm14, %xmm0, %xmm0
vpshufb %xmm14, %xmm1, %xmm1
vmovdqu 32(%rax), %xmm2
vmovdqu 48(%rax), %xmm3
vpshufb %xmm14, %xmm2, %xmm2
vpshufb %xmm14, %xmm3, %xmm3
vmovdqu 64(%rax), %xmm4
vmovdqu 80(%rax), %xmm5
vpshufb %xmm14, %xmm4, %xmm4
vpshufb %xmm14, %xmm5, %xmm5
vmovdqu 96(%rax), %xmm6
vmovdqu 112(%rax), %xmm7
vpshufb %xmm14, %xmm6, %xmm6
vpshufb %xmm14, %xmm7, %xmm7
movl $4, 128(%rsp)
leaq L_avx1_sha512_k(%rip), %rsi
movq %r9, %rbx
movq %r12, %rax
xorq %r10, %rbx
# Start of 16 rounds
L_sha256_len_avx1_start:
vpaddq (%rsi), %xmm0, %xmm8
vpaddq 16(%rsi), %xmm1, %xmm9
vmovdqu %xmm8, (%rsp)
vmovdqu %xmm9, 16(%rsp)
vpaddq 32(%rsi), %xmm2, %xmm8
vpaddq 48(%rsi), %xmm3, %xmm9
vmovdqu %xmm8, 32(%rsp)
vmovdqu %xmm9, 48(%rsp)
vpaddq 64(%rsi), %xmm4, %xmm8
vpaddq 80(%rsi), %xmm5, %xmm9
vmovdqu %xmm8, 64(%rsp)
vmovdqu %xmm9, 80(%rsp)
vpaddq 96(%rsi), %xmm6, %xmm8
vpaddq 112(%rsi), %xmm7, %xmm9
vmovdqu %xmm8, 96(%rsp)
vmovdqu %xmm9, 112(%rsp)
addq $0x80, %rsi
# msg_sched: 0-1
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm0, %xmm1, %xmm12
vpalignr $8, %xmm4, %xmm5, %xmm13
# rnd_0: 1 - 1
movq %r8, %rdx
movq %r13, %rcx
addq (%rsp), %r15
xorq %r14, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm0, %xmm13, %xmm0
# rnd_0: 10 - 11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm0, %xmm8, %xmm0
# rnd_1: 1 - 1
movq %r15, %rbx
movq %r12, %rcx
addq 8(%rsp), %r14
xorq %r13, %rcx
vpsrlq $19, %xmm7, %xmm8
vpsllq $45, %xmm7, %xmm9
# rnd_1: 2 - 3
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
vpsrlq $61, %xmm7, %xmm10
vpsllq $3, %xmm7, %xmm11
# rnd_1: 4 - 6
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm7, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
vpaddq %xmm0, %xmm8, %xmm0
# msg_sched done: 0-3
# msg_sched: 2-3
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm1, %xmm2, %xmm12
vpalignr $8, %xmm5, %xmm6, %xmm13
# rnd_0: 1 - 1
movq %r14, %rdx
movq %r11, %rcx
addq 16(%rsp), %r13
xorq %r12, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm1, %xmm13, %xmm1
# rnd_0: 10 - 11
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm1, %xmm8, %xmm1
# rnd_1: 1 - 1
movq %r13, %rbx
movq %r10, %rcx
addq 24(%rsp), %r12
xorq %r11, %rcx
vpsrlq $19, %xmm0, %xmm8
vpsllq $45, %xmm0, %xmm9
# rnd_1: 2 - 3
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
vpsrlq $61, %xmm0, %xmm10
vpsllq $3, %xmm0, %xmm11
# rnd_1: 4 - 6
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm0, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
vpaddq %xmm1, %xmm8, %xmm1
# msg_sched done: 2-5
# msg_sched: 4-5
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm2, %xmm3, %xmm12
vpalignr $8, %xmm6, %xmm7, %xmm13
# rnd_0: 1 - 1
movq %r12, %rdx
movq %r9, %rcx
addq 32(%rsp), %r11
xorq %r10, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm2, %xmm13, %xmm2
# rnd_0: 10 - 11
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm2, %xmm8, %xmm2
# rnd_1: 1 - 1
movq %r11, %rbx
movq %r8, %rcx
addq 40(%rsp), %r10
xorq %r9, %rcx
vpsrlq $19, %xmm1, %xmm8
vpsllq $45, %xmm1, %xmm9
# rnd_1: 2 - 3
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
vpsrlq $61, %xmm1, %xmm10
vpsllq $3, %xmm1, %xmm11
# rnd_1: 4 - 6
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm1, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
vpaddq %xmm2, %xmm8, %xmm2
# msg_sched done: 4-7
# msg_sched: 6-7
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm3, %xmm4, %xmm12
vpalignr $8, %xmm7, %xmm0, %xmm13
# rnd_0: 1 - 1
movq %r10, %rdx
movq %r15, %rcx
addq 48(%rsp), %r9
xorq %r8, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm3, %xmm13, %xmm3
# rnd_0: 10 - 11
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm3, %xmm8, %xmm3
# rnd_1: 1 - 1
movq %r9, %rbx
movq %r14, %rcx
addq 56(%rsp), %r8
xorq %r15, %rcx
vpsrlq $19, %xmm2, %xmm8
vpsllq $45, %xmm2, %xmm9
# rnd_1: 2 - 3
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
vpsrlq $61, %xmm2, %xmm10
vpsllq $3, %xmm2, %xmm11
# rnd_1: 4 - 6
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm2, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
vpaddq %xmm3, %xmm8, %xmm3
# msg_sched done: 6-9
# msg_sched: 8-9
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm4, %xmm5, %xmm12
vpalignr $8, %xmm0, %xmm1, %xmm13
# rnd_0: 1 - 1
movq %r8, %rdx
movq %r13, %rcx
addq 64(%rsp), %r15
xorq %r14, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm4, %xmm13, %xmm4
# rnd_0: 10 - 11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm4, %xmm8, %xmm4
# rnd_1: 1 - 1
movq %r15, %rbx
movq %r12, %rcx
addq 72(%rsp), %r14
xorq %r13, %rcx
vpsrlq $19, %xmm3, %xmm8
vpsllq $45, %xmm3, %xmm9
# rnd_1: 2 - 3
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
vpsrlq $61, %xmm3, %xmm10
vpsllq $3, %xmm3, %xmm11
# rnd_1: 4 - 6
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm3, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
vpaddq %xmm4, %xmm8, %xmm4
# msg_sched done: 8-11
# msg_sched: 10-11
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm5, %xmm6, %xmm12
vpalignr $8, %xmm1, %xmm2, %xmm13
# rnd_0: 1 - 1
movq %r14, %rdx
movq %r11, %rcx
addq 80(%rsp), %r13
xorq %r12, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm5, %xmm13, %xmm5
# rnd_0: 10 - 11
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm5, %xmm8, %xmm5
# rnd_1: 1 - 1
movq %r13, %rbx
movq %r10, %rcx
addq 88(%rsp), %r12
xorq %r11, %rcx
vpsrlq $19, %xmm4, %xmm8
vpsllq $45, %xmm4, %xmm9
# rnd_1: 2 - 3
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
vpsrlq $61, %xmm4, %xmm10
vpsllq $3, %xmm4, %xmm11
# rnd_1: 4 - 6
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm4, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
vpaddq %xmm5, %xmm8, %xmm5
# msg_sched done: 10-13
# msg_sched: 12-13
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm6, %xmm7, %xmm12
vpalignr $8, %xmm2, %xmm3, %xmm13
# rnd_0: 1 - 1
movq %r12, %rdx
movq %r9, %rcx
addq 96(%rsp), %r11
xorq %r10, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm6, %xmm13, %xmm6
# rnd_0: 10 - 11
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm6, %xmm8, %xmm6
# rnd_1: 1 - 1
movq %r11, %rbx
movq %r8, %rcx
addq 104(%rsp), %r10
xorq %r9, %rcx
vpsrlq $19, %xmm5, %xmm8
vpsllq $45, %xmm5, %xmm9
# rnd_1: 2 - 3
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
vpsrlq $61, %xmm5, %xmm10
vpsllq $3, %xmm5, %xmm11
# rnd_1: 4 - 6
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm5, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
vpaddq %xmm6, %xmm8, %xmm6
# msg_sched done: 12-15
# msg_sched: 14-15
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm7, %xmm0, %xmm12
vpalignr $8, %xmm3, %xmm4, %xmm13
# rnd_0: 1 - 1
movq %r10, %rdx
movq %r15, %rcx
addq 112(%rsp), %r9
xorq %r8, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm7, %xmm13, %xmm7
# rnd_0: 10 - 11
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm7, %xmm8, %xmm7
# rnd_1: 1 - 1
movq %r9, %rbx
movq %r14, %rcx
addq 120(%rsp), %r8
xorq %r15, %rcx
vpsrlq $19, %xmm6, %xmm8
vpsllq $45, %xmm6, %xmm9
# rnd_1: 2 - 3
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
vpsrlq $61, %xmm6, %xmm10
vpsllq $3, %xmm6, %xmm11
# rnd_1: 4 - 6
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm6, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
vpaddq %xmm7, %xmm8, %xmm7
# msg_sched done: 14-17
subl $0x01, 128(%rsp)
jne L_sha256_len_avx1_start
vpaddq (%rsi), %xmm0, %xmm8
vpaddq 16(%rsi), %xmm1, %xmm9
vmovdqu %xmm8, (%rsp)
vmovdqu %xmm9, 16(%rsp)
vpaddq 32(%rsi), %xmm2, %xmm8
vpaddq 48(%rsi), %xmm3, %xmm9
vmovdqu %xmm8, 32(%rsp)
vmovdqu %xmm9, 48(%rsp)
vpaddq 64(%rsi), %xmm4, %xmm8
vpaddq 80(%rsi), %xmm5, %xmm9
vmovdqu %xmm8, 64(%rsp)
vmovdqu %xmm9, 80(%rsp)
vpaddq 96(%rsi), %xmm6, %xmm8
vpaddq 112(%rsi), %xmm7, %xmm9
vmovdqu %xmm8, 96(%rsp)
vmovdqu %xmm9, 112(%rsp)
# rnd_all_2: 0-1
# rnd_0: 0 - 11
rorq $23, %rax
movq %r8, %rdx
movq %r13, %rcx
addq (%rsp), %r15
xorq %r14, %rcx
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
# rnd_1: 0 - 11
rorq $23, %rax
movq %r15, %rbx
movq %r12, %rcx
addq 8(%rsp), %r14
xorq %r13, %rcx
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
# rnd_all_2: 2-3
# rnd_0: 0 - 11
rorq $23, %rax
movq %r14, %rdx
movq %r11, %rcx
addq 16(%rsp), %r13
xorq %r12, %rcx
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
# rnd_1: 0 - 11
rorq $23, %rax
movq %r13, %rbx
movq %r10, %rcx
addq 24(%rsp), %r12
xorq %r11, %rcx
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
# rnd_all_2: 4-5
# rnd_0: 0 - 11
rorq $23, %rax
movq %r12, %rdx
movq %r9, %rcx
addq 32(%rsp), %r11
xorq %r10, %rcx
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
# rnd_1: 0 - 11
rorq $23, %rax
movq %r11, %rbx
movq %r8, %rcx
addq 40(%rsp), %r10
xorq %r9, %rcx
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
# rnd_all_2: 6-7
# rnd_0: 0 - 11
rorq $23, %rax
movq %r10, %rdx
movq %r15, %rcx
addq 48(%rsp), %r9
xorq %r8, %rcx
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
# rnd_1: 0 - 11
rorq $23, %rax
movq %r9, %rbx
movq %r14, %rcx
addq 56(%rsp), %r8
xorq %r15, %rcx
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
# rnd_all_2: 8-9
# rnd_0: 0 - 11
rorq $23, %rax
movq %r8, %rdx
movq %r13, %rcx
addq 64(%rsp), %r15
xorq %r14, %rcx
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
# rnd_1: 0 - 11
rorq $23, %rax
movq %r15, %rbx
movq %r12, %rcx
addq 72(%rsp), %r14
xorq %r13, %rcx
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
# rnd_all_2: 10-11
# rnd_0: 0 - 11
rorq $23, %rax
movq %r14, %rdx
movq %r11, %rcx
addq 80(%rsp), %r13
xorq %r12, %rcx
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
# rnd_1: 0 - 11
rorq $23, %rax
movq %r13, %rbx
movq %r10, %rcx
addq 88(%rsp), %r12
xorq %r11, %rcx
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
# rnd_all_2: 12-13
# rnd_0: 0 - 11
rorq $23, %rax
movq %r12, %rdx
movq %r9, %rcx
addq 96(%rsp), %r11
xorq %r10, %rcx
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
# rnd_1: 0 - 11
rorq $23, %rax
movq %r11, %rbx
movq %r8, %rcx
addq 104(%rsp), %r10
xorq %r9, %rcx
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
# rnd_all_2: 14-15
# rnd_0: 0 - 11
rorq $23, %rax
movq %r10, %rdx
movq %r15, %rcx
addq 112(%rsp), %r9
xorq %r8, %rcx
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
# rnd_1: 0 - 11
rorq $23, %rax
movq %r9, %rbx
movq %r14, %rcx
addq 120(%rsp), %r8
xorq %r15, %rcx
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
addq %r8, (%rdi)
addq %r9, 8(%rdi)
addq %r10, 16(%rdi)
addq %r11, 24(%rdi)
addq %r12, 32(%rdi)
addq %r13, 40(%rdi)
addq %r14, 48(%rdi)
addq %r15, 56(%rdi)
xorq %rax, %rax
vzeroupper
addq $0x88, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha512_AVX1,.-Transform_Sha512_AVX1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl Transform_Sha512_AVX1_Len
.type Transform_Sha512_AVX1_Len,@function
.align 16
Transform_Sha512_AVX1_Len:
#else
.section __TEXT,__text
.globl _Transform_Sha512_AVX1_Len
.p2align 4
_Transform_Sha512_AVX1_Len:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
movq %rsi, %rbp
subq $0x90, %rsp
movq 224(%rdi), %rsi
leaq L_avx1_sha512_k(%rip), %rdx
vmovdqa L_avx1_sha512_flip_mask(%rip), %xmm14
movq (%rdi), %r8
movq 8(%rdi), %r9
movq 16(%rdi), %r10
movq 24(%rdi), %r11
movq 32(%rdi), %r12
movq 40(%rdi), %r13
movq 48(%rdi), %r14
movq 56(%rdi), %r15
# Start of loop processing a block
L_sha512_len_avx1_begin:
vmovdqu (%rsi), %xmm0
vmovdqu 16(%rsi), %xmm1
vpshufb %xmm14, %xmm0, %xmm0
vpshufb %xmm14, %xmm1, %xmm1
vmovdqu 32(%rsi), %xmm2
vmovdqu 48(%rsi), %xmm3
vpshufb %xmm14, %xmm2, %xmm2
vpshufb %xmm14, %xmm3, %xmm3
vmovdqu 64(%rsi), %xmm4
vmovdqu 80(%rsi), %xmm5
vpshufb %xmm14, %xmm4, %xmm4
vpshufb %xmm14, %xmm5, %xmm5
vmovdqu 96(%rsi), %xmm6
vmovdqu 112(%rsi), %xmm7
vpshufb %xmm14, %xmm6, %xmm6
vpshufb %xmm14, %xmm7, %xmm7
movl $4, 128(%rsp)
movq %r9, %rbx
movq %r12, %rax
xorq %r10, %rbx
vpaddq (%rdx), %xmm0, %xmm8
vpaddq 16(%rdx), %xmm1, %xmm9
vmovdqu %xmm8, (%rsp)
vmovdqu %xmm9, 16(%rsp)
vpaddq 32(%rdx), %xmm2, %xmm8
vpaddq 48(%rdx), %xmm3, %xmm9
vmovdqu %xmm8, 32(%rsp)
vmovdqu %xmm9, 48(%rsp)
vpaddq 64(%rdx), %xmm4, %xmm8
vpaddq 80(%rdx), %xmm5, %xmm9
vmovdqu %xmm8, 64(%rsp)
vmovdqu %xmm9, 80(%rsp)
vpaddq 96(%rdx), %xmm6, %xmm8
vpaddq 112(%rdx), %xmm7, %xmm9
vmovdqu %xmm8, 96(%rsp)
vmovdqu %xmm9, 112(%rsp)
# Start of 16 rounds
L_sha512_len_avx1_start:
addq $0x80, %rdx
movq %rdx, 136(%rsp)
# msg_sched: 0-1
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm0, %xmm1, %xmm12
vpalignr $8, %xmm4, %xmm5, %xmm13
# rnd_0: 1 - 1
movq %r8, %rdx
movq %r13, %rcx
addq (%rsp), %r15
xorq %r14, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm0, %xmm13, %xmm0
# rnd_0: 10 - 11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm0, %xmm8, %xmm0
# rnd_1: 1 - 1
movq %r15, %rbx
movq %r12, %rcx
addq 8(%rsp), %r14
xorq %r13, %rcx
vpsrlq $19, %xmm7, %xmm8
vpsllq $45, %xmm7, %xmm9
# rnd_1: 2 - 3
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
vpsrlq $61, %xmm7, %xmm10
vpsllq $3, %xmm7, %xmm11
# rnd_1: 4 - 6
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm7, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
vpaddq %xmm0, %xmm8, %xmm0
# msg_sched done: 0-3
# msg_sched: 2-3
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm1, %xmm2, %xmm12
vpalignr $8, %xmm5, %xmm6, %xmm13
# rnd_0: 1 - 1
movq %r14, %rdx
movq %r11, %rcx
addq 16(%rsp), %r13
xorq %r12, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm1, %xmm13, %xmm1
# rnd_0: 10 - 11
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm1, %xmm8, %xmm1
# rnd_1: 1 - 1
movq %r13, %rbx
movq %r10, %rcx
addq 24(%rsp), %r12
xorq %r11, %rcx
vpsrlq $19, %xmm0, %xmm8
vpsllq $45, %xmm0, %xmm9
# rnd_1: 2 - 3
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
vpsrlq $61, %xmm0, %xmm10
vpsllq $3, %xmm0, %xmm11
# rnd_1: 4 - 6
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm0, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
vpaddq %xmm1, %xmm8, %xmm1
# msg_sched done: 2-5
# msg_sched: 4-5
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm2, %xmm3, %xmm12
vpalignr $8, %xmm6, %xmm7, %xmm13
# rnd_0: 1 - 1
movq %r12, %rdx
movq %r9, %rcx
addq 32(%rsp), %r11
xorq %r10, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm2, %xmm13, %xmm2
# rnd_0: 10 - 11
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm2, %xmm8, %xmm2
# rnd_1: 1 - 1
movq %r11, %rbx
movq %r8, %rcx
addq 40(%rsp), %r10
xorq %r9, %rcx
vpsrlq $19, %xmm1, %xmm8
vpsllq $45, %xmm1, %xmm9
# rnd_1: 2 - 3
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
vpsrlq $61, %xmm1, %xmm10
vpsllq $3, %xmm1, %xmm11
# rnd_1: 4 - 6
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm1, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
vpaddq %xmm2, %xmm8, %xmm2
# msg_sched done: 4-7
# msg_sched: 6-7
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm3, %xmm4, %xmm12
vpalignr $8, %xmm7, %xmm0, %xmm13
# rnd_0: 1 - 1
movq %r10, %rdx
movq %r15, %rcx
addq 48(%rsp), %r9
xorq %r8, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm3, %xmm13, %xmm3
# rnd_0: 10 - 11
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm3, %xmm8, %xmm3
# rnd_1: 1 - 1
movq %r9, %rbx
movq %r14, %rcx
addq 56(%rsp), %r8
xorq %r15, %rcx
vpsrlq $19, %xmm2, %xmm8
vpsllq $45, %xmm2, %xmm9
# rnd_1: 2 - 3
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
vpsrlq $61, %xmm2, %xmm10
vpsllq $3, %xmm2, %xmm11
# rnd_1: 4 - 6
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm2, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
vpaddq %xmm3, %xmm8, %xmm3
# msg_sched done: 6-9
# msg_sched: 8-9
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm4, %xmm5, %xmm12
vpalignr $8, %xmm0, %xmm1, %xmm13
# rnd_0: 1 - 1
movq %r8, %rdx
movq %r13, %rcx
addq 64(%rsp), %r15
xorq %r14, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm4, %xmm13, %xmm4
# rnd_0: 10 - 11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm4, %xmm8, %xmm4
# rnd_1: 1 - 1
movq %r15, %rbx
movq %r12, %rcx
addq 72(%rsp), %r14
xorq %r13, %rcx
vpsrlq $19, %xmm3, %xmm8
vpsllq $45, %xmm3, %xmm9
# rnd_1: 2 - 3
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
vpsrlq $61, %xmm3, %xmm10
vpsllq $3, %xmm3, %xmm11
# rnd_1: 4 - 6
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm3, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
vpaddq %xmm4, %xmm8, %xmm4
# msg_sched done: 8-11
# msg_sched: 10-11
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm5, %xmm6, %xmm12
vpalignr $8, %xmm1, %xmm2, %xmm13
# rnd_0: 1 - 1
movq %r14, %rdx
movq %r11, %rcx
addq 80(%rsp), %r13
xorq %r12, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm5, %xmm13, %xmm5
# rnd_0: 10 - 11
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm5, %xmm8, %xmm5
# rnd_1: 1 - 1
movq %r13, %rbx
movq %r10, %rcx
addq 88(%rsp), %r12
xorq %r11, %rcx
vpsrlq $19, %xmm4, %xmm8
vpsllq $45, %xmm4, %xmm9
# rnd_1: 2 - 3
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
vpsrlq $61, %xmm4, %xmm10
vpsllq $3, %xmm4, %xmm11
# rnd_1: 4 - 6
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm4, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
vpaddq %xmm5, %xmm8, %xmm5
# msg_sched done: 10-13
# msg_sched: 12-13
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm6, %xmm7, %xmm12
vpalignr $8, %xmm2, %xmm3, %xmm13
# rnd_0: 1 - 1
movq %r12, %rdx
movq %r9, %rcx
addq 96(%rsp), %r11
xorq %r10, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm6, %xmm13, %xmm6
# rnd_0: 10 - 11
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm6, %xmm8, %xmm6
# rnd_1: 1 - 1
movq %r11, %rbx
movq %r8, %rcx
addq 104(%rsp), %r10
xorq %r9, %rcx
vpsrlq $19, %xmm5, %xmm8
vpsllq $45, %xmm5, %xmm9
# rnd_1: 2 - 3
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
vpsrlq $61, %xmm5, %xmm10
vpsllq $3, %xmm5, %xmm11
# rnd_1: 4 - 6
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm5, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
vpaddq %xmm6, %xmm8, %xmm6
# msg_sched done: 12-15
# msg_sched: 14-15
# rnd_0: 0 - 0
rorq $23, %rax
vpalignr $8, %xmm7, %xmm0, %xmm12
vpalignr $8, %xmm3, %xmm4, %xmm13
# rnd_0: 1 - 1
movq %r10, %rdx
movq %r15, %rcx
addq 112(%rsp), %r9
xorq %r8, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 3
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 4 - 5
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 6 - 7
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 8 - 9
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm7, %xmm13, %xmm7
# rnd_0: 10 - 11
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
# rnd_1: 0 - 0
rorq $23, %rax
vpaddq %xmm7, %xmm8, %xmm7
# rnd_1: 1 - 1
movq %r9, %rbx
movq %r14, %rcx
addq 120(%rsp), %r8
xorq %r15, %rcx
vpsrlq $19, %xmm6, %xmm8
vpsllq $45, %xmm6, %xmm9
# rnd_1: 2 - 3
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
vpsrlq $61, %xmm6, %xmm10
vpsllq $3, %xmm6, %xmm11
# rnd_1: 4 - 6
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 7 - 8
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm6, %xmm11
# rnd_1: 9 - 10
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 11 - 11
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
vpaddq %xmm7, %xmm8, %xmm7
# msg_sched done: 14-17
movq 136(%rsp), %rdx
vpaddq (%rdx), %xmm0, %xmm8
vpaddq 16(%rdx), %xmm1, %xmm9
vmovdqu %xmm8, (%rsp)
vmovdqu %xmm9, 16(%rsp)
vpaddq 32(%rdx), %xmm2, %xmm8
vpaddq 48(%rdx), %xmm3, %xmm9
vmovdqu %xmm8, 32(%rsp)
vmovdqu %xmm9, 48(%rsp)
vpaddq 64(%rdx), %xmm4, %xmm8
vpaddq 80(%rdx), %xmm5, %xmm9
vmovdqu %xmm8, 64(%rsp)
vmovdqu %xmm9, 80(%rsp)
vpaddq 96(%rdx), %xmm6, %xmm8
vpaddq 112(%rdx), %xmm7, %xmm9
vmovdqu %xmm8, 96(%rsp)
vmovdqu %xmm9, 112(%rsp)
subl $0x01, 128(%rsp)
jne L_sha512_len_avx1_start
# rnd_all_2: 0-1
# rnd_0: 0 - 11
rorq $23, %rax
movq %r8, %rdx
movq %r13, %rcx
addq (%rsp), %r15
xorq %r14, %rcx
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
# rnd_1: 0 - 11
rorq $23, %rax
movq %r15, %rbx
movq %r12, %rcx
addq 8(%rsp), %r14
xorq %r13, %rcx
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
# rnd_all_2: 2-3
# rnd_0: 0 - 11
rorq $23, %rax
movq %r14, %rdx
movq %r11, %rcx
addq 16(%rsp), %r13
xorq %r12, %rcx
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
# rnd_1: 0 - 11
rorq $23, %rax
movq %r13, %rbx
movq %r10, %rcx
addq 24(%rsp), %r12
xorq %r11, %rcx
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
# rnd_all_2: 4-5
# rnd_0: 0 - 11
rorq $23, %rax
movq %r12, %rdx
movq %r9, %rcx
addq 32(%rsp), %r11
xorq %r10, %rcx
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
# rnd_1: 0 - 11
rorq $23, %rax
movq %r11, %rbx
movq %r8, %rcx
addq 40(%rsp), %r10
xorq %r9, %rcx
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
# rnd_all_2: 6-7
# rnd_0: 0 - 11
rorq $23, %rax
movq %r10, %rdx
movq %r15, %rcx
addq 48(%rsp), %r9
xorq %r8, %rcx
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
# rnd_1: 0 - 11
rorq $23, %rax
movq %r9, %rbx
movq %r14, %rcx
addq 56(%rsp), %r8
xorq %r15, %rcx
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
# rnd_all_2: 8-9
# rnd_0: 0 - 11
rorq $23, %rax
movq %r8, %rdx
movq %r13, %rcx
addq 64(%rsp), %r15
xorq %r14, %rcx
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
# rnd_1: 0 - 11
rorq $23, %rax
movq %r15, %rbx
movq %r12, %rcx
addq 72(%rsp), %r14
xorq %r13, %rcx
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
# rnd_all_2: 10-11
# rnd_0: 0 - 11
rorq $23, %rax
movq %r14, %rdx
movq %r11, %rcx
addq 80(%rsp), %r13
xorq %r12, %rcx
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
# rnd_1: 0 - 11
rorq $23, %rax
movq %r13, %rbx
movq %r10, %rcx
addq 88(%rsp), %r12
xorq %r11, %rcx
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
# rnd_all_2: 12-13
# rnd_0: 0 - 11
rorq $23, %rax
movq %r12, %rdx
movq %r9, %rcx
addq 96(%rsp), %r11
xorq %r10, %rcx
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
# rnd_1: 0 - 11
rorq $23, %rax
movq %r11, %rbx
movq %r8, %rcx
addq 104(%rsp), %r10
xorq %r9, %rcx
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
# rnd_all_2: 14-15
# rnd_0: 0 - 11
rorq $23, %rax
movq %r10, %rdx
movq %r15, %rcx
addq 112(%rsp), %r9
xorq %r8, %rcx
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
# rnd_1: 0 - 11
rorq $23, %rax
movq %r9, %rbx
movq %r14, %rcx
addq 120(%rsp), %r8
xorq %r15, %rcx
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
addq (%rdi), %r8
addq 8(%rdi), %r9
addq 16(%rdi), %r10
addq 24(%rdi), %r11
addq 32(%rdi), %r12
addq 40(%rdi), %r13
addq 48(%rdi), %r14
addq 56(%rdi), %r15
leaq L_avx1_sha512_k(%rip), %rdx
addq $0x80, %rsi
subl $0x80, %ebp
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq %r12, 32(%rdi)
movq %r13, 40(%rdi)
movq %r14, 48(%rdi)
movq %r15, 56(%rdi)
jnz L_sha512_len_avx1_begin
xorq %rax, %rax
vzeroupper
addq $0x90, %rsp
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha512_AVX1_Len,.-Transform_Sha512_AVX1_Len
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_rorx_sha512_k:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xfc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x6ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x6f067aa72176fba,0xa637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_rorx_sha512_flip_mask:
.quad 0x1020304050607, 0x8090a0b0c0d0e0f
#ifndef __APPLE__
.text
.globl Transform_Sha512_AVX1_RORX
.type Transform_Sha512_AVX1_RORX,@function
.align 16
Transform_Sha512_AVX1_RORX:
#else
.section __TEXT,__text
.globl _Transform_Sha512_AVX1_RORX
.p2align 4
_Transform_Sha512_AVX1_RORX:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x88, %rsp
leaq 64(%rdi), %rax
vmovdqa L_avx1_rorx_sha512_flip_mask(%rip), %xmm14
movq (%rdi), %r8
movq 8(%rdi), %r9
movq 16(%rdi), %r10
movq 24(%rdi), %r11
movq 32(%rdi), %r12
movq 40(%rdi), %r13
movq 48(%rdi), %r14
movq 56(%rdi), %r15
vmovdqu (%rax), %xmm0
vmovdqu 16(%rax), %xmm1
vpshufb %xmm14, %xmm0, %xmm0
vpshufb %xmm14, %xmm1, %xmm1
vmovdqu 32(%rax), %xmm2
vmovdqu 48(%rax), %xmm3
vpshufb %xmm14, %xmm2, %xmm2
vpshufb %xmm14, %xmm3, %xmm3
vmovdqu 64(%rax), %xmm4
vmovdqu 80(%rax), %xmm5
vpshufb %xmm14, %xmm4, %xmm4
vpshufb %xmm14, %xmm5, %xmm5
vmovdqu 96(%rax), %xmm6
vmovdqu 112(%rax), %xmm7
vpshufb %xmm14, %xmm6, %xmm6
vpshufb %xmm14, %xmm7, %xmm7
movl $4, 128(%rsp)
leaq L_avx1_rorx_sha512_k(%rip), %rsi
movq %r9, %rbx
xorq %rdx, %rdx
xorq %r10, %rbx
vpaddq (%rsi), %xmm0, %xmm8
vpaddq 16(%rsi), %xmm1, %xmm9
vmovdqu %xmm8, (%rsp)
vmovdqu %xmm9, 16(%rsp)
vpaddq 32(%rsi), %xmm2, %xmm8
vpaddq 48(%rsi), %xmm3, %xmm9
vmovdqu %xmm8, 32(%rsp)
vmovdqu %xmm9, 48(%rsp)
vpaddq 64(%rsi), %xmm4, %xmm8
vpaddq 80(%rsi), %xmm5, %xmm9
vmovdqu %xmm8, 64(%rsp)
vmovdqu %xmm9, 80(%rsp)
vpaddq 96(%rsi), %xmm6, %xmm8
vpaddq 112(%rsi), %xmm7, %xmm9
vmovdqu %xmm8, 96(%rsp)
vmovdqu %xmm9, 112(%rsp)
# Start of 16 rounds
L_sha256_len_avx1_rorx_start:
addq $0x80, %rsi
# msg_sched: 0-1
# rnd_0: 0 - 0
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
vpalignr $8, %xmm0, %xmm1, %xmm12
vpalignr $8, %xmm4, %xmm5, %xmm13
# rnd_0: 1 - 1
addq (%rsp), %r15
movq %r13, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm0, %xmm13, %xmm0
# rnd_0: 6 - 7
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
vpaddq %xmm0, %xmm8, %xmm0
# rnd_1: 0 - 0
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
vpsrlq $19, %xmm7, %xmm8
vpsllq $45, %xmm7, %xmm9
# rnd_1: 1 - 1
addq 8(%rsp), %r14
movq %r12, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm7, %xmm10
vpsllq $3, %xmm7, %xmm11
# rnd_1: 2 - 2
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm7, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
addq %r14, %r10
xorq %r15, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
vpaddq %xmm0, %xmm8, %xmm0
# msg_sched done: 0-3
# msg_sched: 2-3
# rnd_0: 0 - 0
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
vpalignr $8, %xmm1, %xmm2, %xmm12
vpalignr $8, %xmm5, %xmm6, %xmm13
# rnd_0: 1 - 1
addq 16(%rsp), %r13
movq %r11, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm1, %xmm13, %xmm1
# rnd_0: 6 - 7
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
vpaddq %xmm1, %xmm8, %xmm1
# rnd_1: 0 - 0
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
vpsrlq $19, %xmm0, %xmm8
vpsllq $45, %xmm0, %xmm9
# rnd_1: 1 - 1
addq 24(%rsp), %r12
movq %r10, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm0, %xmm10
vpsllq $3, %xmm0, %xmm11
# rnd_1: 2 - 2
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm0, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
addq %r12, %r8
xorq %r13, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
vpaddq %xmm1, %xmm8, %xmm1
# msg_sched done: 2-5
# msg_sched: 4-5
# rnd_0: 0 - 0
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
vpalignr $8, %xmm2, %xmm3, %xmm12
vpalignr $8, %xmm6, %xmm7, %xmm13
# rnd_0: 1 - 1
addq 32(%rsp), %r11
movq %r9, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm2, %xmm13, %xmm2
# rnd_0: 6 - 7
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
vpaddq %xmm2, %xmm8, %xmm2
# rnd_1: 0 - 0
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
vpsrlq $19, %xmm1, %xmm8
vpsllq $45, %xmm1, %xmm9
# rnd_1: 1 - 1
addq 40(%rsp), %r10
movq %r8, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm1, %xmm10
vpsllq $3, %xmm1, %xmm11
# rnd_1: 2 - 2
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm1, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
addq %r10, %r14
xorq %r11, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
vpaddq %xmm2, %xmm8, %xmm2
# msg_sched done: 4-7
# msg_sched: 6-7
# rnd_0: 0 - 0
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
vpalignr $8, %xmm3, %xmm4, %xmm12
vpalignr $8, %xmm7, %xmm0, %xmm13
# rnd_0: 1 - 1
addq 48(%rsp), %r9
movq %r15, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm3, %xmm13, %xmm3
# rnd_0: 6 - 7
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
vpaddq %xmm3, %xmm8, %xmm3
# rnd_1: 0 - 0
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
vpsrlq $19, %xmm2, %xmm8
vpsllq $45, %xmm2, %xmm9
# rnd_1: 1 - 1
addq 56(%rsp), %r8
movq %r14, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm2, %xmm10
vpsllq $3, %xmm2, %xmm11
# rnd_1: 2 - 2
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm2, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
addq %r8, %r12
xorq %r9, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
vpaddq %xmm3, %xmm8, %xmm3
# msg_sched done: 6-9
# msg_sched: 8-9
# rnd_0: 0 - 0
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
vpalignr $8, %xmm4, %xmm5, %xmm12
vpalignr $8, %xmm0, %xmm1, %xmm13
# rnd_0: 1 - 1
addq 64(%rsp), %r15
movq %r13, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm4, %xmm13, %xmm4
# rnd_0: 6 - 7
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
vpaddq %xmm4, %xmm8, %xmm4
# rnd_1: 0 - 0
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
vpsrlq $19, %xmm3, %xmm8
vpsllq $45, %xmm3, %xmm9
# rnd_1: 1 - 1
addq 72(%rsp), %r14
movq %r12, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm3, %xmm10
vpsllq $3, %xmm3, %xmm11
# rnd_1: 2 - 2
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm3, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
addq %r14, %r10
xorq %r15, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
vpaddq %xmm4, %xmm8, %xmm4
# msg_sched done: 8-11
# msg_sched: 10-11
# rnd_0: 0 - 0
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
vpalignr $8, %xmm5, %xmm6, %xmm12
vpalignr $8, %xmm1, %xmm2, %xmm13
# rnd_0: 1 - 1
addq 80(%rsp), %r13
movq %r11, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm5, %xmm13, %xmm5
# rnd_0: 6 - 7
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
vpaddq %xmm5, %xmm8, %xmm5
# rnd_1: 0 - 0
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
vpsrlq $19, %xmm4, %xmm8
vpsllq $45, %xmm4, %xmm9
# rnd_1: 1 - 1
addq 88(%rsp), %r12
movq %r10, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm4, %xmm10
vpsllq $3, %xmm4, %xmm11
# rnd_1: 2 - 2
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm4, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
addq %r12, %r8
xorq %r13, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
vpaddq %xmm5, %xmm8, %xmm5
# msg_sched done: 10-13
# msg_sched: 12-13
# rnd_0: 0 - 0
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
vpalignr $8, %xmm6, %xmm7, %xmm12
vpalignr $8, %xmm2, %xmm3, %xmm13
# rnd_0: 1 - 1
addq 96(%rsp), %r11
movq %r9, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm6, %xmm13, %xmm6
# rnd_0: 6 - 7
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
vpaddq %xmm6, %xmm8, %xmm6
# rnd_1: 0 - 0
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
vpsrlq $19, %xmm5, %xmm8
vpsllq $45, %xmm5, %xmm9
# rnd_1: 1 - 1
addq 104(%rsp), %r10
movq %r8, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm5, %xmm10
vpsllq $3, %xmm5, %xmm11
# rnd_1: 2 - 2
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm5, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
addq %r10, %r14
xorq %r11, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
vpaddq %xmm6, %xmm8, %xmm6
# msg_sched done: 12-15
# msg_sched: 14-15
# rnd_0: 0 - 0
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
vpalignr $8, %xmm7, %xmm0, %xmm12
vpalignr $8, %xmm3, %xmm4, %xmm13
# rnd_0: 1 - 1
addq 112(%rsp), %r9
movq %r15, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm7, %xmm13, %xmm7
# rnd_0: 6 - 7
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
vpaddq %xmm7, %xmm8, %xmm7
# rnd_1: 0 - 0
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
vpsrlq $19, %xmm6, %xmm8
vpsllq $45, %xmm6, %xmm9
# rnd_1: 1 - 1
addq 120(%rsp), %r8
movq %r14, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm6, %xmm10
vpsllq $3, %xmm6, %xmm11
# rnd_1: 2 - 2
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm6, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
addq %r8, %r12
xorq %r9, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
vpaddq %xmm7, %xmm8, %xmm7
# msg_sched done: 14-17
vpaddq (%rsi), %xmm0, %xmm8
vpaddq 16(%rsi), %xmm1, %xmm9
vmovdqu %xmm8, (%rsp)
vmovdqu %xmm9, 16(%rsp)
vpaddq 32(%rsi), %xmm2, %xmm8
vpaddq 48(%rsi), %xmm3, %xmm9
vmovdqu %xmm8, 32(%rsp)
vmovdqu %xmm9, 48(%rsp)
vpaddq 64(%rsi), %xmm4, %xmm8
vpaddq 80(%rsi), %xmm5, %xmm9
vmovdqu %xmm8, 64(%rsp)
vmovdqu %xmm9, 80(%rsp)
vpaddq 96(%rsi), %xmm6, %xmm8
vpaddq 112(%rsi), %xmm7, %xmm9
vmovdqu %xmm8, 96(%rsp)
vmovdqu %xmm9, 112(%rsp)
subl $0x01, 128(%rsp)
jne L_sha256_len_avx1_rorx_start
# rnd_all_2: 0-1
# rnd_0: 0 - 7
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
addq (%rsp), %r15
movq %r13, %rdx
xorq %rax, %rcx
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
# rnd_1: 0 - 7
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
addq 8(%rsp), %r14
movq %r12, %rbx
xorq %rax, %rcx
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
addq %r14, %r10
xorq %r15, %rbx
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
# rnd_all_2: 2-3
# rnd_0: 0 - 7
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
addq 16(%rsp), %r13
movq %r11, %rdx
xorq %rax, %rcx
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
# rnd_1: 0 - 7
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
addq 24(%rsp), %r12
movq %r10, %rbx
xorq %rax, %rcx
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
addq %r12, %r8
xorq %r13, %rbx
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
# rnd_all_2: 4-5
# rnd_0: 0 - 7
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
addq 32(%rsp), %r11
movq %r9, %rdx
xorq %rax, %rcx
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
# rnd_1: 0 - 7
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
addq 40(%rsp), %r10
movq %r8, %rbx
xorq %rax, %rcx
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
addq %r10, %r14
xorq %r11, %rbx
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
# rnd_all_2: 6-7
# rnd_0: 0 - 7
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
addq 48(%rsp), %r9
movq %r15, %rdx
xorq %rax, %rcx
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
# rnd_1: 0 - 7
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
addq 56(%rsp), %r8
movq %r14, %rbx
xorq %rax, %rcx
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
addq %r8, %r12
xorq %r9, %rbx
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
# rnd_all_2: 8-9
# rnd_0: 0 - 7
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
addq 64(%rsp), %r15
movq %r13, %rdx
xorq %rax, %rcx
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
# rnd_1: 0 - 7
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
addq 72(%rsp), %r14
movq %r12, %rbx
xorq %rax, %rcx
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
addq %r14, %r10
xorq %r15, %rbx
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
# rnd_all_2: 10-11
# rnd_0: 0 - 7
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
addq 80(%rsp), %r13
movq %r11, %rdx
xorq %rax, %rcx
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
# rnd_1: 0 - 7
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
addq 88(%rsp), %r12
movq %r10, %rbx
xorq %rax, %rcx
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
addq %r12, %r8
xorq %r13, %rbx
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
# rnd_all_2: 12-13
# rnd_0: 0 - 7
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
addq 96(%rsp), %r11
movq %r9, %rdx
xorq %rax, %rcx
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
# rnd_1: 0 - 7
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
addq 104(%rsp), %r10
movq %r8, %rbx
xorq %rax, %rcx
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
addq %r10, %r14
xorq %r11, %rbx
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
# rnd_all_2: 14-15
# rnd_0: 0 - 7
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
addq 112(%rsp), %r9
movq %r15, %rdx
xorq %rax, %rcx
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
# rnd_1: 0 - 7
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
addq 120(%rsp), %r8
movq %r14, %rbx
xorq %rax, %rcx
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
addq %r8, %r12
xorq %r9, %rbx
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
addq %rdx, %r8
addq %r8, (%rdi)
addq %r9, 8(%rdi)
addq %r10, 16(%rdi)
addq %r11, 24(%rdi)
addq %r12, 32(%rdi)
addq %r13, 40(%rdi)
addq %r14, 48(%rdi)
addq %r15, 56(%rdi)
xorq %rax, %rax
vzeroupper
addq $0x88, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha512_AVX1_RORX,.-Transform_Sha512_AVX1_RORX
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl Transform_Sha512_AVX1_RORX_Len
.type Transform_Sha512_AVX1_RORX_Len,@function
.align 16
Transform_Sha512_AVX1_RORX_Len:
#else
.section __TEXT,__text
.globl _Transform_Sha512_AVX1_RORX_Len
.p2align 4
_Transform_Sha512_AVX1_RORX_Len:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
movq %rsi, %rbp
subq $0x90, %rsp
movq 224(%rdi), %rsi
leaq L_avx1_rorx_sha512_k(%rip), %rcx
vmovdqa L_avx1_rorx_sha512_flip_mask(%rip), %xmm14
movq (%rdi), %r8
movq 8(%rdi), %r9
movq 16(%rdi), %r10
movq 24(%rdi), %r11
movq 32(%rdi), %r12
movq 40(%rdi), %r13
movq 48(%rdi), %r14
movq 56(%rdi), %r15
# Start of loop processing a block
L_sha512_len_avx1_rorx_begin:
vmovdqu (%rsi), %xmm0
vmovdqu 16(%rsi), %xmm1
vpshufb %xmm14, %xmm0, %xmm0
vpshufb %xmm14, %xmm1, %xmm1
vmovdqu 32(%rsi), %xmm2
vmovdqu 48(%rsi), %xmm3
vpshufb %xmm14, %xmm2, %xmm2
vpshufb %xmm14, %xmm3, %xmm3
vmovdqu 64(%rsi), %xmm4
vmovdqu 80(%rsi), %xmm5
vpshufb %xmm14, %xmm4, %xmm4
vpshufb %xmm14, %xmm5, %xmm5
vmovdqu 96(%rsi), %xmm6
vmovdqu 112(%rsi), %xmm7
vpshufb %xmm14, %xmm6, %xmm6
vpshufb %xmm14, %xmm7, %xmm7
movl $4, 128(%rsp)
movq %r9, %rbx
xorq %rdx, %rdx
xorq %r10, %rbx
vpaddq (%rcx), %xmm0, %xmm8
vpaddq 16(%rcx), %xmm1, %xmm9
vmovdqu %xmm8, (%rsp)
vmovdqu %xmm9, 16(%rsp)
vpaddq 32(%rcx), %xmm2, %xmm8
vpaddq 48(%rcx), %xmm3, %xmm9
vmovdqu %xmm8, 32(%rsp)
vmovdqu %xmm9, 48(%rsp)
vpaddq 64(%rcx), %xmm4, %xmm8
vpaddq 80(%rcx), %xmm5, %xmm9
vmovdqu %xmm8, 64(%rsp)
vmovdqu %xmm9, 80(%rsp)
vpaddq 96(%rcx), %xmm6, %xmm8
vpaddq 112(%rcx), %xmm7, %xmm9
vmovdqu %xmm8, 96(%rsp)
vmovdqu %xmm9, 112(%rsp)
# Start of 16 rounds
L_sha512_len_avx1_rorx_start:
addq $0x80, %rcx
movq %rcx, 136(%rsp)
# msg_sched: 0-1
# rnd_0: 0 - 0
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
vpalignr $8, %xmm0, %xmm1, %xmm12
vpalignr $8, %xmm4, %xmm5, %xmm13
# rnd_0: 1 - 1
addq (%rsp), %r15
movq %r13, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm0, %xmm13, %xmm0
# rnd_0: 6 - 7
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
vpaddq %xmm0, %xmm8, %xmm0
# rnd_1: 0 - 0
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
vpsrlq $19, %xmm7, %xmm8
vpsllq $45, %xmm7, %xmm9
# rnd_1: 1 - 1
addq 8(%rsp), %r14
movq %r12, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm7, %xmm10
vpsllq $3, %xmm7, %xmm11
# rnd_1: 2 - 2
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm7, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
addq %r14, %r10
xorq %r15, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
vpaddq %xmm0, %xmm8, %xmm0
# msg_sched done: 0-3
# msg_sched: 2-3
# rnd_0: 0 - 0
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
vpalignr $8, %xmm1, %xmm2, %xmm12
vpalignr $8, %xmm5, %xmm6, %xmm13
# rnd_0: 1 - 1
addq 16(%rsp), %r13
movq %r11, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm1, %xmm13, %xmm1
# rnd_0: 6 - 7
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
vpaddq %xmm1, %xmm8, %xmm1
# rnd_1: 0 - 0
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
vpsrlq $19, %xmm0, %xmm8
vpsllq $45, %xmm0, %xmm9
# rnd_1: 1 - 1
addq 24(%rsp), %r12
movq %r10, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm0, %xmm10
vpsllq $3, %xmm0, %xmm11
# rnd_1: 2 - 2
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm0, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
addq %r12, %r8
xorq %r13, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
vpaddq %xmm1, %xmm8, %xmm1
# msg_sched done: 2-5
# msg_sched: 4-5
# rnd_0: 0 - 0
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
vpalignr $8, %xmm2, %xmm3, %xmm12
vpalignr $8, %xmm6, %xmm7, %xmm13
# rnd_0: 1 - 1
addq 32(%rsp), %r11
movq %r9, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm2, %xmm13, %xmm2
# rnd_0: 6 - 7
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
vpaddq %xmm2, %xmm8, %xmm2
# rnd_1: 0 - 0
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
vpsrlq $19, %xmm1, %xmm8
vpsllq $45, %xmm1, %xmm9
# rnd_1: 1 - 1
addq 40(%rsp), %r10
movq %r8, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm1, %xmm10
vpsllq $3, %xmm1, %xmm11
# rnd_1: 2 - 2
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm1, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
addq %r10, %r14
xorq %r11, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
vpaddq %xmm2, %xmm8, %xmm2
# msg_sched done: 4-7
# msg_sched: 6-7
# rnd_0: 0 - 0
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
vpalignr $8, %xmm3, %xmm4, %xmm12
vpalignr $8, %xmm7, %xmm0, %xmm13
# rnd_0: 1 - 1
addq 48(%rsp), %r9
movq %r15, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm3, %xmm13, %xmm3
# rnd_0: 6 - 7
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
vpaddq %xmm3, %xmm8, %xmm3
# rnd_1: 0 - 0
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
vpsrlq $19, %xmm2, %xmm8
vpsllq $45, %xmm2, %xmm9
# rnd_1: 1 - 1
addq 56(%rsp), %r8
movq %r14, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm2, %xmm10
vpsllq $3, %xmm2, %xmm11
# rnd_1: 2 - 2
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm2, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
addq %r8, %r12
xorq %r9, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
vpaddq %xmm3, %xmm8, %xmm3
# msg_sched done: 6-9
# msg_sched: 8-9
# rnd_0: 0 - 0
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
vpalignr $8, %xmm4, %xmm5, %xmm12
vpalignr $8, %xmm0, %xmm1, %xmm13
# rnd_0: 1 - 1
addq 64(%rsp), %r15
movq %r13, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm4, %xmm13, %xmm4
# rnd_0: 6 - 7
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
vpaddq %xmm4, %xmm8, %xmm4
# rnd_1: 0 - 0
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
vpsrlq $19, %xmm3, %xmm8
vpsllq $45, %xmm3, %xmm9
# rnd_1: 1 - 1
addq 72(%rsp), %r14
movq %r12, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm3, %xmm10
vpsllq $3, %xmm3, %xmm11
# rnd_1: 2 - 2
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm3, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
addq %r14, %r10
xorq %r15, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
vpaddq %xmm4, %xmm8, %xmm4
# msg_sched done: 8-11
# msg_sched: 10-11
# rnd_0: 0 - 0
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
vpalignr $8, %xmm5, %xmm6, %xmm12
vpalignr $8, %xmm1, %xmm2, %xmm13
# rnd_0: 1 - 1
addq 80(%rsp), %r13
movq %r11, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm5, %xmm13, %xmm5
# rnd_0: 6 - 7
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
vpaddq %xmm5, %xmm8, %xmm5
# rnd_1: 0 - 0
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
vpsrlq $19, %xmm4, %xmm8
vpsllq $45, %xmm4, %xmm9
# rnd_1: 1 - 1
addq 88(%rsp), %r12
movq %r10, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm4, %xmm10
vpsllq $3, %xmm4, %xmm11
# rnd_1: 2 - 2
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm4, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
addq %r12, %r8
xorq %r13, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
vpaddq %xmm5, %xmm8, %xmm5
# msg_sched done: 10-13
# msg_sched: 12-13
# rnd_0: 0 - 0
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
vpalignr $8, %xmm6, %xmm7, %xmm12
vpalignr $8, %xmm2, %xmm3, %xmm13
# rnd_0: 1 - 1
addq 96(%rsp), %r11
movq %r9, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm6, %xmm13, %xmm6
# rnd_0: 6 - 7
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
vpaddq %xmm6, %xmm8, %xmm6
# rnd_1: 0 - 0
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
vpsrlq $19, %xmm5, %xmm8
vpsllq $45, %xmm5, %xmm9
# rnd_1: 1 - 1
addq 104(%rsp), %r10
movq %r8, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm5, %xmm10
vpsllq $3, %xmm5, %xmm11
# rnd_1: 2 - 2
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm5, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
addq %r10, %r14
xorq %r11, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
vpaddq %xmm6, %xmm8, %xmm6
# msg_sched done: 12-15
# msg_sched: 14-15
# rnd_0: 0 - 0
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
vpalignr $8, %xmm7, %xmm0, %xmm12
vpalignr $8, %xmm3, %xmm4, %xmm13
# rnd_0: 1 - 1
addq 112(%rsp), %r9
movq %r15, %rdx
xorq %rax, %rcx
vpsrlq $0x01, %xmm12, %xmm8
vpsllq $63, %xmm12, %xmm9
# rnd_0: 2 - 2
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
vpsrlq $8, %xmm12, %xmm10
vpsllq $56, %xmm12, %xmm11
# rnd_0: 3 - 3
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_0: 4 - 4
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
vpsrlq $7, %xmm12, %xmm11
vpxor %xmm10, %xmm8, %xmm8
# rnd_0: 5 - 5
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
vpxor %xmm11, %xmm8, %xmm8
vpaddq %xmm7, %xmm13, %xmm7
# rnd_0: 6 - 7
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
vpaddq %xmm7, %xmm8, %xmm7
# rnd_1: 0 - 0
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
vpsrlq $19, %xmm6, %xmm8
vpsllq $45, %xmm6, %xmm9
# rnd_1: 1 - 1
addq 120(%rsp), %r8
movq %r14, %rbx
xorq %rax, %rcx
vpsrlq $61, %xmm6, %xmm10
vpsllq $3, %xmm6, %xmm11
# rnd_1: 2 - 2
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
vpor %xmm9, %xmm8, %xmm8
vpor %xmm11, %xmm10, %xmm10
# rnd_1: 3 - 4
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
vpxor %xmm10, %xmm8, %xmm8
vpsrlq $6, %xmm6, %xmm11
# rnd_1: 5 - 6
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
addq %r8, %r12
xorq %r9, %rbx
vpxor %xmm11, %xmm8, %xmm8
# rnd_1: 7 - 7
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
vpaddq %xmm7, %xmm8, %xmm7
# msg_sched done: 14-17
movq 136(%rsp), %rcx
vpaddq (%rcx), %xmm0, %xmm8
vpaddq 16(%rcx), %xmm1, %xmm9
vmovdqu %xmm8, (%rsp)
vmovdqu %xmm9, 16(%rsp)
vpaddq 32(%rcx), %xmm2, %xmm8
vpaddq 48(%rcx), %xmm3, %xmm9
vmovdqu %xmm8, 32(%rsp)
vmovdqu %xmm9, 48(%rsp)
vpaddq 64(%rcx), %xmm4, %xmm8
vpaddq 80(%rcx), %xmm5, %xmm9
vmovdqu %xmm8, 64(%rsp)
vmovdqu %xmm9, 80(%rsp)
vpaddq 96(%rcx), %xmm6, %xmm8
vpaddq 112(%rcx), %xmm7, %xmm9
vmovdqu %xmm8, 96(%rsp)
vmovdqu %xmm9, 112(%rsp)
subl $0x01, 128(%rsp)
jne L_sha512_len_avx1_rorx_start
vpaddq (%rcx), %xmm0, %xmm8
vpaddq 16(%rcx), %xmm1, %xmm9
vmovdqu %xmm8, (%rsp)
vmovdqu %xmm9, 16(%rsp)
vpaddq 32(%rcx), %xmm2, %xmm8
vpaddq 48(%rcx), %xmm3, %xmm9
vmovdqu %xmm8, 32(%rsp)
vmovdqu %xmm9, 48(%rsp)
vpaddq 64(%rcx), %xmm4, %xmm8
vpaddq 80(%rcx), %xmm5, %xmm9
vmovdqu %xmm8, 64(%rsp)
vmovdqu %xmm9, 80(%rsp)
vpaddq 96(%rcx), %xmm6, %xmm8
vpaddq 112(%rcx), %xmm7, %xmm9
vmovdqu %xmm8, 96(%rsp)
vmovdqu %xmm9, 112(%rsp)
# rnd_all_2: 0-1
# rnd_0: 0 - 7
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
addq (%rsp), %r15
movq %r13, %rdx
xorq %rax, %rcx
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
# rnd_1: 0 - 7
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
addq 8(%rsp), %r14
movq %r12, %rbx
xorq %rax, %rcx
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
addq %r14, %r10
xorq %r15, %rbx
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
# rnd_all_2: 2-3
# rnd_0: 0 - 7
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
addq 16(%rsp), %r13
movq %r11, %rdx
xorq %rax, %rcx
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
# rnd_1: 0 - 7
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
addq 24(%rsp), %r12
movq %r10, %rbx
xorq %rax, %rcx
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
addq %r12, %r8
xorq %r13, %rbx
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
# rnd_all_2: 4-5
# rnd_0: 0 - 7
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
addq 32(%rsp), %r11
movq %r9, %rdx
xorq %rax, %rcx
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
# rnd_1: 0 - 7
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
addq 40(%rsp), %r10
movq %r8, %rbx
xorq %rax, %rcx
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
addq %r10, %r14
xorq %r11, %rbx
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
# rnd_all_2: 6-7
# rnd_0: 0 - 7
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
addq 48(%rsp), %r9
movq %r15, %rdx
xorq %rax, %rcx
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
# rnd_1: 0 - 7
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
addq 56(%rsp), %r8
movq %r14, %rbx
xorq %rax, %rcx
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
addq %r8, %r12
xorq %r9, %rbx
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
# rnd_all_2: 8-9
# rnd_0: 0 - 7
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
addq 64(%rsp), %r15
movq %r13, %rdx
xorq %rax, %rcx
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
# rnd_1: 0 - 7
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
addq 72(%rsp), %r14
movq %r12, %rbx
xorq %rax, %rcx
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
addq %r14, %r10
xorq %r15, %rbx
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
# rnd_all_2: 10-11
# rnd_0: 0 - 7
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
addq 80(%rsp), %r13
movq %r11, %rdx
xorq %rax, %rcx
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
# rnd_1: 0 - 7
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
addq 88(%rsp), %r12
movq %r10, %rbx
xorq %rax, %rcx
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
addq %r12, %r8
xorq %r13, %rbx
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
# rnd_all_2: 12-13
# rnd_0: 0 - 7
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
addq 96(%rsp), %r11
movq %r9, %rdx
xorq %rax, %rcx
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
# rnd_1: 0 - 7
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
addq 104(%rsp), %r10
movq %r8, %rbx
xorq %rax, %rcx
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
addq %r10, %r14
xorq %r11, %rbx
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
# rnd_all_2: 14-15
# rnd_0: 0 - 7
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
addq 112(%rsp), %r9
movq %r15, %rdx
xorq %rax, %rcx
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
# rnd_1: 0 - 7
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
addq 120(%rsp), %r8
movq %r14, %rbx
xorq %rax, %rcx
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
addq %r8, %r12
xorq %r9, %rbx
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
addq %rdx, %r8
addq (%rdi), %r8
addq 8(%rdi), %r9
addq 16(%rdi), %r10
addq 24(%rdi), %r11
addq 32(%rdi), %r12
addq 40(%rdi), %r13
addq 48(%rdi), %r14
addq 56(%rdi), %r15
leaq L_avx1_rorx_sha512_k(%rip), %rcx
addq $0x80, %rsi
subl $0x80, %ebp
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq %r12, 32(%rdi)
movq %r13, 40(%rdi)
movq %r14, 48(%rdi)
movq %r15, 56(%rdi)
jnz L_sha512_len_avx1_rorx_begin
xorq %rax, %rax
vzeroupper
addq $0x90, %rsp
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha512_AVX1_RORX_Len,.-Transform_Sha512_AVX1_RORX_Len
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX1 */
#ifdef HAVE_INTEL_AVX2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_sha512_k:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xfc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x6ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x6f067aa72176fba,0xa637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_sha512_k_2:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xfc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0xfc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x6ca6351e003826f,0x142929670a0e6e70
.quad 0x6ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x6f067aa72176fba,0xa637dc5a2c898a6
.quad 0x6f067aa72176fba,0xa637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 8
#else
.p2align 3
#endif /* __APPLE__ */
L_avx2_sha512_k_2_end:
.quad 1024+L_avx2_sha512_k_2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_avx2_sha512_flip_mask:
.quad 0x1020304050607, 0x8090a0b0c0d0e0f
.quad 0x1020304050607, 0x8090a0b0c0d0e0f
#ifndef __APPLE__
.text
.globl Transform_Sha512_AVX2
.type Transform_Sha512_AVX2,@function
.align 16
Transform_Sha512_AVX2:
#else
.section __TEXT,__text
.globl _Transform_Sha512_AVX2
.p2align 4
_Transform_Sha512_AVX2:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x88, %rsp
leaq 64(%rdi), %rax
vmovdqa L_avx2_sha512_flip_mask(%rip), %ymm15
movq (%rdi), %r8
movq 8(%rdi), %r9
movq 16(%rdi), %r10
movq 24(%rdi), %r11
movq 32(%rdi), %r12
movq 40(%rdi), %r13
movq 48(%rdi), %r14
movq 56(%rdi), %r15
vmovdqu (%rax), %ymm0
vmovdqu 32(%rax), %ymm1
vpshufb %ymm15, %ymm0, %ymm0
vpshufb %ymm15, %ymm1, %ymm1
vmovdqu 64(%rax), %ymm2
vmovdqu 96(%rax), %ymm3
vpshufb %ymm15, %ymm2, %ymm2
vpshufb %ymm15, %ymm3, %ymm3
movl $4, 128(%rsp)
leaq L_avx2_sha512_k(%rip), %rsi
movq %r9, %rbx
movq %r12, %rax
xorq %r10, %rbx
vpaddq (%rsi), %ymm0, %ymm8
vpaddq 32(%rsi), %ymm1, %ymm9
vmovdqu %ymm8, (%rsp)
vmovdqu %ymm9, 32(%rsp)
vpaddq 64(%rsi), %ymm2, %ymm8
vpaddq 96(%rsi), %ymm3, %ymm9
vmovdqu %ymm8, 64(%rsp)
vmovdqu %ymm9, 96(%rsp)
# Start of 16 rounds
L_sha256_avx2_start:
addq $0x80, %rsi
rorq $23, %rax
vpblendd $3, %ymm1, %ymm0, %ymm12
vpblendd $3, %ymm3, %ymm2, %ymm13
movq %r8, %rdx
movq %r13, %rcx
addq (%rsp), %r15
xorq %r14, %rcx
xorq %r12, %rax
andq %r12, %rcx
vpermq $57, %ymm12, %ymm12
rorq $4, %rax
xorq %r14, %rcx
vpermq $57, %ymm13, %ymm13
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
vpsrlq $0x01, %ymm12, %ymm8
addq %rax, %r15
movq %r8, %rcx
vpsllq $63, %ymm12, %ymm9
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $8, %ymm12, %ymm10
xorq %r8, %rcx
xorq %r9, %rbx
vpsllq $56, %ymm12, %ymm11
rorq $6, %rcx
addq %r15, %r11
vpor %ymm9, %ymm8, %ymm8
xorq %r8, %rcx
addq %rbx, %r15
vpor %ymm11, %ymm10, %ymm10
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
rorq $23, %rax
vpsrlq $7, %ymm12, %ymm11
movq %r15, %rbx
movq %r12, %rcx
vpxor %ymm10, %ymm8, %ymm8
addq 8(%rsp), %r14
xorq %r13, %rcx
vpxor %ymm11, %ymm8, %ymm8
xorq %r11, %rax
andq %r11, %rcx
vpaddq %ymm0, %ymm13, %ymm0
rorq $4, %rax
xorq %r13, %rcx
vpaddq %ymm0, %ymm8, %ymm0
xorq %r11, %rax
addq %rcx, %r14
vperm2I128 $0x81, %ymm3, %ymm3, %ymm14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
andq %rbx, %rdx
rorq $5, %rcx
vpsrlq $19, %ymm14, %ymm8
xorq %r15, %rcx
xorq %r8, %rdx
vpsllq $45, %ymm14, %ymm9
rorq $6, %rcx
addq %r14, %r10
vpsrlq $61, %ymm14, %ymm10
xorq %r15, %rcx
addq %rdx, %r14
vpsllq $3, %ymm14, %ymm11
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
rorq $23, %rax
vpor %ymm9, %ymm8, %ymm8
movq %r14, %rdx
movq %r11, %rcx
addq 16(%rsp), %r13
xorq %r12, %rcx
vpor %ymm11, %ymm10, %ymm10
xorq %r10, %rax
andq %r10, %rcx
vpxor %ymm10, %ymm8, %ymm8
rorq $4, %rax
xorq %r12, %rcx
vpsrlq $6, %ymm14, %ymm11
xorq %r10, %rax
addq %rcx, %r13
vpxor %ymm11, %ymm8, %ymm8
rorq $14, %rax
xorq %r15, %rdx
vpaddq %ymm0, %ymm8, %ymm0
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vperm2I128 $8, %ymm0, %ymm0, %ymm14
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
vpsrlq $19, %ymm14, %ymm8
xorq %r14, %rcx
addq %rbx, %r13
vpsllq $45, %ymm14, %ymm9
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
rorq $23, %rax
vpsrlq $61, %ymm14, %ymm10
movq %r13, %rbx
movq %r10, %rcx
addq 24(%rsp), %r12
xorq %r11, %rcx
vpsllq $3, %ymm14, %ymm11
xorq %r9, %rax
andq %r9, %rcx
vpor %ymm9, %ymm8, %ymm8
rorq $4, %rax
xorq %r11, %rcx
vpor %ymm11, %ymm10, %ymm10
xorq %r9, %rax
addq %rcx, %r12
vpxor %ymm10, %ymm8, %ymm8
rorq $14, %rax
xorq %r14, %rbx
vpsrlq $6, %ymm14, %ymm11
addq %rax, %r12
movq %r13, %rcx
vpxor %ymm11, %ymm8, %ymm8
andq %rbx, %rdx
rorq $5, %rcx
vpaddq %ymm0, %ymm8, %ymm0
xorq %r13, %rcx
xorq %r14, %rdx
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
rorq $23, %rax
vpblendd $3, %ymm2, %ymm1, %ymm12
vpblendd $3, %ymm0, %ymm3, %ymm13
movq %r12, %rdx
movq %r9, %rcx
addq 32(%rsp), %r11
xorq %r10, %rcx
xorq %r8, %rax
andq %r8, %rcx
vpermq $57, %ymm12, %ymm12
rorq $4, %rax
xorq %r10, %rcx
vpermq $57, %ymm13, %ymm13
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
vpsrlq $0x01, %ymm12, %ymm8
addq %rax, %r11
movq %r12, %rcx
vpsllq $63, %ymm12, %ymm9
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $8, %ymm12, %ymm10
xorq %r12, %rcx
xorq %r13, %rbx
vpsllq $56, %ymm12, %ymm11
rorq $6, %rcx
addq %r11, %r15
vpor %ymm9, %ymm8, %ymm8
xorq %r12, %rcx
addq %rbx, %r11
vpor %ymm11, %ymm10, %ymm10
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
rorq $23, %rax
vpsrlq $7, %ymm12, %ymm11
movq %r11, %rbx
movq %r8, %rcx
vpxor %ymm10, %ymm8, %ymm8
addq 40(%rsp), %r10
xorq %r9, %rcx
vpxor %ymm11, %ymm8, %ymm8
xorq %r15, %rax
andq %r15, %rcx
vpaddq %ymm1, %ymm13, %ymm1
rorq $4, %rax
xorq %r9, %rcx
vpaddq %ymm1, %ymm8, %ymm1
xorq %r15, %rax
addq %rcx, %r10
vperm2I128 $0x81, %ymm0, %ymm0, %ymm14
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
andq %rbx, %rdx
rorq $5, %rcx
vpsrlq $19, %ymm14, %ymm8
xorq %r11, %rcx
xorq %r12, %rdx
vpsllq $45, %ymm14, %ymm9
rorq $6, %rcx
addq %r10, %r14
vpsrlq $61, %ymm14, %ymm10
xorq %r11, %rcx
addq %rdx, %r10
vpsllq $3, %ymm14, %ymm11
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
rorq $23, %rax
vpor %ymm9, %ymm8, %ymm8
movq %r10, %rdx
movq %r15, %rcx
addq 48(%rsp), %r9
xorq %r8, %rcx
vpor %ymm11, %ymm10, %ymm10
xorq %r14, %rax
andq %r14, %rcx
vpxor %ymm10, %ymm8, %ymm8
rorq $4, %rax
xorq %r8, %rcx
vpsrlq $6, %ymm14, %ymm11
xorq %r14, %rax
addq %rcx, %r9
vpxor %ymm11, %ymm8, %ymm8
rorq $14, %rax
xorq %r11, %rdx
vpaddq %ymm1, %ymm8, %ymm1
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vperm2I128 $8, %ymm1, %ymm1, %ymm14
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
vpsrlq $19, %ymm14, %ymm8
xorq %r10, %rcx
addq %rbx, %r9
vpsllq $45, %ymm14, %ymm9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
rorq $23, %rax
vpsrlq $61, %ymm14, %ymm10
movq %r9, %rbx
movq %r14, %rcx
addq 56(%rsp), %r8
xorq %r15, %rcx
vpsllq $3, %ymm14, %ymm11
xorq %r13, %rax
andq %r13, %rcx
vpor %ymm9, %ymm8, %ymm8
rorq $4, %rax
xorq %r15, %rcx
vpor %ymm11, %ymm10, %ymm10
xorq %r13, %rax
addq %rcx, %r8
vpxor %ymm10, %ymm8, %ymm8
rorq $14, %rax
xorq %r10, %rbx
vpsrlq $6, %ymm14, %ymm11
addq %rax, %r8
movq %r9, %rcx
vpxor %ymm11, %ymm8, %ymm8
andq %rbx, %rdx
rorq $5, %rcx
vpaddq %ymm1, %ymm8, %ymm1
xorq %r9, %rcx
xorq %r10, %rdx
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
rorq $23, %rax
vpblendd $3, %ymm3, %ymm2, %ymm12
vpblendd $3, %ymm1, %ymm0, %ymm13
movq %r8, %rdx
movq %r13, %rcx
addq 64(%rsp), %r15
xorq %r14, %rcx
xorq %r12, %rax
andq %r12, %rcx
vpermq $57, %ymm12, %ymm12
rorq $4, %rax
xorq %r14, %rcx
vpermq $57, %ymm13, %ymm13
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
vpsrlq $0x01, %ymm12, %ymm8
addq %rax, %r15
movq %r8, %rcx
vpsllq $63, %ymm12, %ymm9
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $8, %ymm12, %ymm10
xorq %r8, %rcx
xorq %r9, %rbx
vpsllq $56, %ymm12, %ymm11
rorq $6, %rcx
addq %r15, %r11
vpor %ymm9, %ymm8, %ymm8
xorq %r8, %rcx
addq %rbx, %r15
vpor %ymm11, %ymm10, %ymm10
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
rorq $23, %rax
vpsrlq $7, %ymm12, %ymm11
movq %r15, %rbx
movq %r12, %rcx
vpxor %ymm10, %ymm8, %ymm8
addq 72(%rsp), %r14
xorq %r13, %rcx
vpxor %ymm11, %ymm8, %ymm8
xorq %r11, %rax
andq %r11, %rcx
vpaddq %ymm2, %ymm13, %ymm2
rorq $4, %rax
xorq %r13, %rcx
vpaddq %ymm2, %ymm8, %ymm2
xorq %r11, %rax
addq %rcx, %r14
vperm2I128 $0x81, %ymm1, %ymm1, %ymm14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
andq %rbx, %rdx
rorq $5, %rcx
vpsrlq $19, %ymm14, %ymm8
xorq %r15, %rcx
xorq %r8, %rdx
vpsllq $45, %ymm14, %ymm9
rorq $6, %rcx
addq %r14, %r10
vpsrlq $61, %ymm14, %ymm10
xorq %r15, %rcx
addq %rdx, %r14
vpsllq $3, %ymm14, %ymm11
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
rorq $23, %rax
vpor %ymm9, %ymm8, %ymm8
movq %r14, %rdx
movq %r11, %rcx
addq 80(%rsp), %r13
xorq %r12, %rcx
vpor %ymm11, %ymm10, %ymm10
xorq %r10, %rax
andq %r10, %rcx
vpxor %ymm10, %ymm8, %ymm8
rorq $4, %rax
xorq %r12, %rcx
vpsrlq $6, %ymm14, %ymm11
xorq %r10, %rax
addq %rcx, %r13
vpxor %ymm11, %ymm8, %ymm8
rorq $14, %rax
xorq %r15, %rdx
vpaddq %ymm2, %ymm8, %ymm2
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vperm2I128 $8, %ymm2, %ymm2, %ymm14
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
vpsrlq $19, %ymm14, %ymm8
xorq %r14, %rcx
addq %rbx, %r13
vpsllq $45, %ymm14, %ymm9
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
rorq $23, %rax
vpsrlq $61, %ymm14, %ymm10
movq %r13, %rbx
movq %r10, %rcx
addq 88(%rsp), %r12
xorq %r11, %rcx
vpsllq $3, %ymm14, %ymm11
xorq %r9, %rax
andq %r9, %rcx
vpor %ymm9, %ymm8, %ymm8
rorq $4, %rax
xorq %r11, %rcx
vpor %ymm11, %ymm10, %ymm10
xorq %r9, %rax
addq %rcx, %r12
vpxor %ymm10, %ymm8, %ymm8
rorq $14, %rax
xorq %r14, %rbx
vpsrlq $6, %ymm14, %ymm11
addq %rax, %r12
movq %r13, %rcx
vpxor %ymm11, %ymm8, %ymm8
andq %rbx, %rdx
rorq $5, %rcx
vpaddq %ymm2, %ymm8, %ymm2
xorq %r13, %rcx
xorq %r14, %rdx
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
rorq $23, %rax
vpblendd $3, %ymm0, %ymm3, %ymm12
vpblendd $3, %ymm2, %ymm1, %ymm13
movq %r12, %rdx
movq %r9, %rcx
addq 96(%rsp), %r11
xorq %r10, %rcx
xorq %r8, %rax
andq %r8, %rcx
vpermq $57, %ymm12, %ymm12
rorq $4, %rax
xorq %r10, %rcx
vpermq $57, %ymm13, %ymm13
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
vpsrlq $0x01, %ymm12, %ymm8
addq %rax, %r11
movq %r12, %rcx
vpsllq $63, %ymm12, %ymm9
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $8, %ymm12, %ymm10
xorq %r12, %rcx
xorq %r13, %rbx
vpsllq $56, %ymm12, %ymm11
rorq $6, %rcx
addq %r11, %r15
vpor %ymm9, %ymm8, %ymm8
xorq %r12, %rcx
addq %rbx, %r11
vpor %ymm11, %ymm10, %ymm10
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
rorq $23, %rax
vpsrlq $7, %ymm12, %ymm11
movq %r11, %rbx
movq %r8, %rcx
vpxor %ymm10, %ymm8, %ymm8
addq 104(%rsp), %r10
xorq %r9, %rcx
vpxor %ymm11, %ymm8, %ymm8
xorq %r15, %rax
andq %r15, %rcx
vpaddq %ymm3, %ymm13, %ymm3
rorq $4, %rax
xorq %r9, %rcx
vpaddq %ymm3, %ymm8, %ymm3
xorq %r15, %rax
addq %rcx, %r10
vperm2I128 $0x81, %ymm2, %ymm2, %ymm14
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
andq %rbx, %rdx
rorq $5, %rcx
vpsrlq $19, %ymm14, %ymm8
xorq %r11, %rcx
xorq %r12, %rdx
vpsllq $45, %ymm14, %ymm9
rorq $6, %rcx
addq %r10, %r14
vpsrlq $61, %ymm14, %ymm10
xorq %r11, %rcx
addq %rdx, %r10
vpsllq $3, %ymm14, %ymm11
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
rorq $23, %rax
vpor %ymm9, %ymm8, %ymm8
movq %r10, %rdx
movq %r15, %rcx
addq 112(%rsp), %r9
xorq %r8, %rcx
vpor %ymm11, %ymm10, %ymm10
xorq %r14, %rax
andq %r14, %rcx
vpxor %ymm10, %ymm8, %ymm8
rorq $4, %rax
xorq %r8, %rcx
vpsrlq $6, %ymm14, %ymm11
xorq %r14, %rax
addq %rcx, %r9
vpxor %ymm11, %ymm8, %ymm8
rorq $14, %rax
xorq %r11, %rdx
vpaddq %ymm3, %ymm8, %ymm3
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vperm2I128 $8, %ymm3, %ymm3, %ymm14
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
vpsrlq $19, %ymm14, %ymm8
xorq %r10, %rcx
addq %rbx, %r9
vpsllq $45, %ymm14, %ymm9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
rorq $23, %rax
vpsrlq $61, %ymm14, %ymm10
movq %r9, %rbx
movq %r14, %rcx
addq 120(%rsp), %r8
xorq %r15, %rcx
vpsllq $3, %ymm14, %ymm11
xorq %r13, %rax
andq %r13, %rcx
vpor %ymm9, %ymm8, %ymm8
rorq $4, %rax
xorq %r15, %rcx
vpor %ymm11, %ymm10, %ymm10
xorq %r13, %rax
addq %rcx, %r8
vpxor %ymm10, %ymm8, %ymm8
rorq $14, %rax
xorq %r10, %rbx
vpsrlq $6, %ymm14, %ymm11
addq %rax, %r8
movq %r9, %rcx
vpxor %ymm11, %ymm8, %ymm8
andq %rbx, %rdx
rorq $5, %rcx
vpaddq %ymm3, %ymm8, %ymm3
xorq %r9, %rcx
xorq %r10, %rdx
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
vpaddq (%rsi), %ymm0, %ymm8
vpaddq 32(%rsi), %ymm1, %ymm9
vmovdqu %ymm8, (%rsp)
vmovdqu %ymm9, 32(%rsp)
vpaddq 64(%rsi), %ymm2, %ymm8
vpaddq 96(%rsi), %ymm3, %ymm9
vmovdqu %ymm8, 64(%rsp)
vmovdqu %ymm9, 96(%rsp)
subl $0x01, 128(%rsp)
jne L_sha256_avx2_start
rorq $23, %rax
movq %r8, %rdx
movq %r13, %rcx
addq (%rsp), %r15
xorq %r14, %rcx
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
rorq $23, %rax
movq %r15, %rbx
movq %r12, %rcx
addq 8(%rsp), %r14
xorq %r13, %rcx
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
rorq $23, %rax
movq %r14, %rdx
movq %r11, %rcx
addq 16(%rsp), %r13
xorq %r12, %rcx
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
rorq $23, %rax
movq %r13, %rbx
movq %r10, %rcx
addq 24(%rsp), %r12
xorq %r11, %rcx
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
rorq $23, %rax
movq %r12, %rdx
movq %r9, %rcx
addq 32(%rsp), %r11
xorq %r10, %rcx
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
rorq $23, %rax
movq %r11, %rbx
movq %r8, %rcx
addq 40(%rsp), %r10
xorq %r9, %rcx
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
rorq $23, %rax
movq %r10, %rdx
movq %r15, %rcx
addq 48(%rsp), %r9
xorq %r8, %rcx
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
rorq $23, %rax
movq %r9, %rbx
movq %r14, %rcx
addq 56(%rsp), %r8
xorq %r15, %rcx
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
rorq $23, %rax
movq %r8, %rdx
movq %r13, %rcx
addq 64(%rsp), %r15
xorq %r14, %rcx
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
rorq $23, %rax
movq %r15, %rbx
movq %r12, %rcx
addq 72(%rsp), %r14
xorq %r13, %rcx
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
rorq $23, %rax
movq %r14, %rdx
movq %r11, %rcx
addq 80(%rsp), %r13
xorq %r12, %rcx
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
rorq $23, %rax
movq %r13, %rbx
movq %r10, %rcx
addq 88(%rsp), %r12
xorq %r11, %rcx
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
rorq $23, %rax
movq %r12, %rdx
movq %r9, %rcx
addq 96(%rsp), %r11
xorq %r10, %rcx
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
rorq $23, %rax
movq %r11, %rbx
movq %r8, %rcx
addq 104(%rsp), %r10
xorq %r9, %rcx
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
rorq $23, %rax
movq %r10, %rdx
movq %r15, %rcx
addq 112(%rsp), %r9
xorq %r8, %rcx
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
rorq $23, %rax
movq %r9, %rbx
movq %r14, %rcx
addq 120(%rsp), %r8
xorq %r15, %rcx
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
addq %r8, (%rdi)
addq %r9, 8(%rdi)
addq %r10, 16(%rdi)
addq %r11, 24(%rdi)
addq %r12, 32(%rdi)
addq %r13, 40(%rdi)
addq %r14, 48(%rdi)
addq %r15, 56(%rdi)
xorq %rax, %rax
vzeroupper
addq $0x88, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha512_AVX2,.-Transform_Sha512_AVX2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl Transform_Sha512_AVX2_Len
.type Transform_Sha512_AVX2_Len,@function
.align 16
Transform_Sha512_AVX2_Len:
#else
.section __TEXT,__text
.globl _Transform_Sha512_AVX2_Len
.p2align 4
_Transform_Sha512_AVX2_Len:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
movq %rsi, %rbp
testb $0x80, %bpl
je L_sha512_len_avx2_block
movq 224(%rdi), %rcx
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vmovups %ymm0, 64(%rdi)
vmovups %ymm1, 96(%rdi)
vmovups %ymm2, 128(%rdi)
vmovups %ymm3, 160(%rdi)
#ifndef __APPLE__
call Transform_Sha512_AVX2@plt
#else
call _Transform_Sha512_AVX2
#endif /* __APPLE__ */
addq $0x80, 224(%rdi)
subl $0x80, %ebp
jz L_sha512_len_avx2_done
L_sha512_len_avx2_block:
subq $0x548, %rsp
movq 224(%rdi), %rcx
vmovdqa L_avx2_sha512_flip_mask(%rip), %ymm15
movq (%rdi), %r8
movq 8(%rdi), %r9
movq 16(%rdi), %r10
movq 24(%rdi), %r11
movq 32(%rdi), %r12
movq 40(%rdi), %r13
movq 48(%rdi), %r14
movq 56(%rdi), %r15
movq %rbp, 1344(%rsp)
# Start of loop processing two blocks
L_sha512_len_avx2_begin:
movq %rsp, %rbp
leaq L_avx2_sha512_k_2(%rip), %rsi
movq %r9, %rbx
movq %r12, %rax
vmovdqu (%rcx), %xmm0
vmovdqu 16(%rcx), %xmm1
vinserti128 $0x01, 128(%rcx), %ymm0, %ymm0
vinserti128 $0x01, 144(%rcx), %ymm1, %ymm1
vpshufb %ymm15, %ymm0, %ymm0
vpshufb %ymm15, %ymm1, %ymm1
vmovdqu 32(%rcx), %xmm2
vmovdqu 48(%rcx), %xmm3
vinserti128 $0x01, 160(%rcx), %ymm2, %ymm2
vinserti128 $0x01, 176(%rcx), %ymm3, %ymm3
vpshufb %ymm15, %ymm2, %ymm2
vpshufb %ymm15, %ymm3, %ymm3
vmovdqu 64(%rcx), %xmm4
vmovdqu 80(%rcx), %xmm5
vinserti128 $0x01, 192(%rcx), %ymm4, %ymm4
vinserti128 $0x01, 208(%rcx), %ymm5, %ymm5
vpshufb %ymm15, %ymm4, %ymm4
vpshufb %ymm15, %ymm5, %ymm5
vmovdqu 96(%rcx), %xmm6
vmovdqu 112(%rcx), %xmm7
vinserti128 $0x01, 224(%rcx), %ymm6, %ymm6
vinserti128 $0x01, 240(%rcx), %ymm7, %ymm7
vpshufb %ymm15, %ymm6, %ymm6
vpshufb %ymm15, %ymm7, %ymm7
xorq %r10, %rbx
# Start of 16 rounds
L_sha512_len_avx2_start:
vpaddq (%rsi), %ymm0, %ymm8
vpaddq 32(%rsi), %ymm1, %ymm9
vmovdqu %ymm8, (%rbp)
vmovdqu %ymm9, 32(%rbp)
vpaddq 64(%rsi), %ymm2, %ymm8
vpaddq 96(%rsi), %ymm3, %ymm9
vmovdqu %ymm8, 64(%rbp)
vmovdqu %ymm9, 96(%rbp)
vpaddq 128(%rsi), %ymm4, %ymm8
vpaddq 160(%rsi), %ymm5, %ymm9
vmovdqu %ymm8, 128(%rbp)
vmovdqu %ymm9, 160(%rbp)
vpaddq 192(%rsi), %ymm6, %ymm8
vpaddq 224(%rsi), %ymm7, %ymm9
vmovdqu %ymm8, 192(%rbp)
vmovdqu %ymm9, 224(%rbp)
# msg_sched: 0-1
rorq $23, %rax
vpalignr $8, %ymm0, %ymm1, %ymm12
vpalignr $8, %ymm4, %ymm5, %ymm13
movq %r8, %rdx
movq %r13, %rcx
addq (%rbp), %r15
xorq %r14, %rcx
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm0, %ymm13, %ymm0
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
rorq $23, %rax
vpaddq %ymm0, %ymm8, %ymm0
movq %r15, %rbx
movq %r12, %rcx
addq 8(%rbp), %r14
xorq %r13, %rcx
vpsrlq $19, %ymm7, %ymm8
vpsllq $45, %ymm7, %ymm9
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
vpsrlq $61, %ymm7, %ymm10
vpsllq $3, %ymm7, %ymm11
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm7, %ymm11
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
vpxor %ymm11, %ymm8, %ymm8
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
vpaddq %ymm0, %ymm8, %ymm0
# msg_sched done: 0-3
# msg_sched: 4-5
rorq $23, %rax
vpalignr $8, %ymm1, %ymm2, %ymm12
vpalignr $8, %ymm5, %ymm6, %ymm13
movq %r14, %rdx
movq %r11, %rcx
addq 32(%rbp), %r13
xorq %r12, %rcx
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm1, %ymm13, %ymm1
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
rorq $23, %rax
vpaddq %ymm1, %ymm8, %ymm1
movq %r13, %rbx
movq %r10, %rcx
addq 40(%rbp), %r12
xorq %r11, %rcx
vpsrlq $19, %ymm0, %ymm8
vpsllq $45, %ymm0, %ymm9
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
vpsrlq $61, %ymm0, %ymm10
vpsllq $3, %ymm0, %ymm11
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm0, %ymm11
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
vpxor %ymm11, %ymm8, %ymm8
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
vpaddq %ymm1, %ymm8, %ymm1
# msg_sched done: 4-7
# msg_sched: 8-9
rorq $23, %rax
vpalignr $8, %ymm2, %ymm3, %ymm12
vpalignr $8, %ymm6, %ymm7, %ymm13
movq %r12, %rdx
movq %r9, %rcx
addq 64(%rbp), %r11
xorq %r10, %rcx
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm2, %ymm13, %ymm2
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
rorq $23, %rax
vpaddq %ymm2, %ymm8, %ymm2
movq %r11, %rbx
movq %r8, %rcx
addq 72(%rbp), %r10
xorq %r9, %rcx
vpsrlq $19, %ymm1, %ymm8
vpsllq $45, %ymm1, %ymm9
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
vpsrlq $61, %ymm1, %ymm10
vpsllq $3, %ymm1, %ymm11
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm1, %ymm11
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
vpxor %ymm11, %ymm8, %ymm8
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
vpaddq %ymm2, %ymm8, %ymm2
# msg_sched done: 8-11
# msg_sched: 12-13
rorq $23, %rax
vpalignr $8, %ymm3, %ymm4, %ymm12
vpalignr $8, %ymm7, %ymm0, %ymm13
movq %r10, %rdx
movq %r15, %rcx
addq 96(%rbp), %r9
xorq %r8, %rcx
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm3, %ymm13, %ymm3
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
rorq $23, %rax
vpaddq %ymm3, %ymm8, %ymm3
movq %r9, %rbx
movq %r14, %rcx
addq 104(%rbp), %r8
xorq %r15, %rcx
vpsrlq $19, %ymm2, %ymm8
vpsllq $45, %ymm2, %ymm9
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
vpsrlq $61, %ymm2, %ymm10
vpsllq $3, %ymm2, %ymm11
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm2, %ymm11
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
vpxor %ymm11, %ymm8, %ymm8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
vpaddq %ymm3, %ymm8, %ymm3
# msg_sched done: 12-15
# msg_sched: 16-17
rorq $23, %rax
vpalignr $8, %ymm4, %ymm5, %ymm12
vpalignr $8, %ymm0, %ymm1, %ymm13
movq %r8, %rdx
movq %r13, %rcx
addq 128(%rbp), %r15
xorq %r14, %rcx
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm4, %ymm13, %ymm4
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
rorq $23, %rax
vpaddq %ymm4, %ymm8, %ymm4
movq %r15, %rbx
movq %r12, %rcx
addq 136(%rbp), %r14
xorq %r13, %rcx
vpsrlq $19, %ymm3, %ymm8
vpsllq $45, %ymm3, %ymm9
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
vpsrlq $61, %ymm3, %ymm10
vpsllq $3, %ymm3, %ymm11
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm3, %ymm11
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
vpxor %ymm11, %ymm8, %ymm8
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
vpaddq %ymm4, %ymm8, %ymm4
# msg_sched done: 16-19
# msg_sched: 20-21
rorq $23, %rax
vpalignr $8, %ymm5, %ymm6, %ymm12
vpalignr $8, %ymm1, %ymm2, %ymm13
movq %r14, %rdx
movq %r11, %rcx
addq 160(%rbp), %r13
xorq %r12, %rcx
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm5, %ymm13, %ymm5
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
rorq $23, %rax
vpaddq %ymm5, %ymm8, %ymm5
movq %r13, %rbx
movq %r10, %rcx
addq 168(%rbp), %r12
xorq %r11, %rcx
vpsrlq $19, %ymm4, %ymm8
vpsllq $45, %ymm4, %ymm9
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
vpsrlq $61, %ymm4, %ymm10
vpsllq $3, %ymm4, %ymm11
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm4, %ymm11
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
vpxor %ymm11, %ymm8, %ymm8
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
vpaddq %ymm5, %ymm8, %ymm5
# msg_sched done: 20-23
# msg_sched: 24-25
rorq $23, %rax
vpalignr $8, %ymm6, %ymm7, %ymm12
vpalignr $8, %ymm2, %ymm3, %ymm13
movq %r12, %rdx
movq %r9, %rcx
addq 192(%rbp), %r11
xorq %r10, %rcx
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm6, %ymm13, %ymm6
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
rorq $23, %rax
vpaddq %ymm6, %ymm8, %ymm6
movq %r11, %rbx
movq %r8, %rcx
addq 200(%rbp), %r10
xorq %r9, %rcx
vpsrlq $19, %ymm5, %ymm8
vpsllq $45, %ymm5, %ymm9
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
vpsrlq $61, %ymm5, %ymm10
vpsllq $3, %ymm5, %ymm11
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm5, %ymm11
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
vpxor %ymm11, %ymm8, %ymm8
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
vpaddq %ymm6, %ymm8, %ymm6
# msg_sched done: 24-27
# msg_sched: 28-29
rorq $23, %rax
vpalignr $8, %ymm7, %ymm0, %ymm12
vpalignr $8, %ymm3, %ymm4, %ymm13
movq %r10, %rdx
movq %r15, %rcx
addq 224(%rbp), %r9
xorq %r8, %rcx
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm7, %ymm13, %ymm7
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
rorq $23, %rax
vpaddq %ymm7, %ymm8, %ymm7
movq %r9, %rbx
movq %r14, %rcx
addq 232(%rbp), %r8
xorq %r15, %rcx
vpsrlq $19, %ymm6, %ymm8
vpsllq $45, %ymm6, %ymm9
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
vpsrlq $61, %ymm6, %ymm10
vpsllq $3, %ymm6, %ymm11
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm6, %ymm11
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
vpxor %ymm11, %ymm8, %ymm8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
vpaddq %ymm7, %ymm8, %ymm7
# msg_sched done: 28-31
addq $0x100, %rsi
addq $0x100, %rbp
cmpq L_avx2_sha512_k_2_end(%rip), %rsi
jne L_sha512_len_avx2_start
vpaddq (%rsi), %ymm0, %ymm8
vpaddq 32(%rsi), %ymm1, %ymm9
vmovdqu %ymm8, (%rbp)
vmovdqu %ymm9, 32(%rbp)
vpaddq 64(%rsi), %ymm2, %ymm8
vpaddq 96(%rsi), %ymm3, %ymm9
vmovdqu %ymm8, 64(%rbp)
vmovdqu %ymm9, 96(%rbp)
vpaddq 128(%rsi), %ymm4, %ymm8
vpaddq 160(%rsi), %ymm5, %ymm9
vmovdqu %ymm8, 128(%rbp)
vmovdqu %ymm9, 160(%rbp)
vpaddq 192(%rsi), %ymm6, %ymm8
vpaddq 224(%rsi), %ymm7, %ymm9
vmovdqu %ymm8, 192(%rbp)
vmovdqu %ymm9, 224(%rbp)
rorq $23, %rax
movq %r8, %rdx
movq %r13, %rcx
addq (%rbp), %r15
xorq %r14, %rcx
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
rorq $23, %rax
movq %r15, %rbx
movq %r12, %rcx
addq 8(%rbp), %r14
xorq %r13, %rcx
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
rorq $23, %rax
movq %r14, %rdx
movq %r11, %rcx
addq 32(%rbp), %r13
xorq %r12, %rcx
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
rorq $23, %rax
movq %r13, %rbx
movq %r10, %rcx
addq 40(%rbp), %r12
xorq %r11, %rcx
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
rorq $23, %rax
movq %r12, %rdx
movq %r9, %rcx
addq 64(%rbp), %r11
xorq %r10, %rcx
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
rorq $23, %rax
movq %r11, %rbx
movq %r8, %rcx
addq 72(%rbp), %r10
xorq %r9, %rcx
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
rorq $23, %rax
movq %r10, %rdx
movq %r15, %rcx
addq 96(%rbp), %r9
xorq %r8, %rcx
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
rorq $23, %rax
movq %r9, %rbx
movq %r14, %rcx
addq 104(%rbp), %r8
xorq %r15, %rcx
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
rorq $23, %rax
movq %r8, %rdx
movq %r13, %rcx
addq 128(%rbp), %r15
xorq %r14, %rcx
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
rorq $23, %rax
movq %r15, %rbx
movq %r12, %rcx
addq 136(%rbp), %r14
xorq %r13, %rcx
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
rorq $23, %rax
movq %r14, %rdx
movq %r11, %rcx
addq 160(%rbp), %r13
xorq %r12, %rcx
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
rorq $23, %rax
movq %r13, %rbx
movq %r10, %rcx
addq 168(%rbp), %r12
xorq %r11, %rcx
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
rorq $23, %rax
movq %r12, %rdx
movq %r9, %rcx
addq 192(%rbp), %r11
xorq %r10, %rcx
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
rorq $23, %rax
movq %r11, %rbx
movq %r8, %rcx
addq 200(%rbp), %r10
xorq %r9, %rcx
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
rorq $23, %rax
movq %r10, %rdx
movq %r15, %rcx
addq 224(%rbp), %r9
xorq %r8, %rcx
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
rorq $23, %rax
movq %r9, %rbx
movq %r14, %rcx
addq 232(%rbp), %r8
xorq %r15, %rcx
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
subq $0x400, %rbp
addq (%rdi), %r8
addq 8(%rdi), %r9
addq 16(%rdi), %r10
addq 24(%rdi), %r11
addq 32(%rdi), %r12
addq 40(%rdi), %r13
addq 48(%rdi), %r14
addq 56(%rdi), %r15
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq %r12, 32(%rdi)
movq %r13, 40(%rdi)
movq %r14, 48(%rdi)
movq %r15, 56(%rdi)
movq %r9, %rbx
movq %r12, %rax
xorq %r10, %rbx
movq $5, %rsi
L_sha512_len_avx2_tail:
rorq $23, %rax
movq %r8, %rdx
movq %r13, %rcx
addq 16(%rbp), %r15
xorq %r14, %rcx
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
rorq $23, %rax
movq %r15, %rbx
movq %r12, %rcx
addq 24(%rbp), %r14
xorq %r13, %rcx
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
rorq $23, %rax
movq %r14, %rdx
movq %r11, %rcx
addq 48(%rbp), %r13
xorq %r12, %rcx
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
rorq $23, %rax
movq %r13, %rbx
movq %r10, %rcx
addq 56(%rbp), %r12
xorq %r11, %rcx
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
rorq $23, %rax
movq %r12, %rdx
movq %r9, %rcx
addq 80(%rbp), %r11
xorq %r10, %rcx
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
rorq $23, %rax
movq %r11, %rbx
movq %r8, %rcx
addq 88(%rbp), %r10
xorq %r9, %rcx
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
rorq $23, %rax
movq %r10, %rdx
movq %r15, %rcx
addq 112(%rbp), %r9
xorq %r8, %rcx
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
rorq $23, %rax
movq %r9, %rbx
movq %r14, %rcx
addq 120(%rbp), %r8
xorq %r15, %rcx
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
rorq $23, %rax
movq %r8, %rdx
movq %r13, %rcx
addq 144(%rbp), %r15
xorq %r14, %rcx
xorq %r12, %rax
andq %r12, %rcx
rorq $4, %rax
xorq %r14, %rcx
xorq %r12, %rax
addq %rcx, %r15
rorq $14, %rax
xorq %r9, %rdx
addq %rax, %r15
movq %r8, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r8, %rcx
xorq %r9, %rbx
rorq $6, %rcx
addq %r15, %r11
xorq %r8, %rcx
addq %rbx, %r15
rorq $28, %rcx
movq %r11, %rax
addq %rcx, %r15
rorq $23, %rax
movq %r15, %rbx
movq %r12, %rcx
addq 152(%rbp), %r14
xorq %r13, %rcx
xorq %r11, %rax
andq %r11, %rcx
rorq $4, %rax
xorq %r13, %rcx
xorq %r11, %rax
addq %rcx, %r14
rorq $14, %rax
xorq %r8, %rbx
addq %rax, %r14
movq %r15, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r15, %rcx
xorq %r8, %rdx
rorq $6, %rcx
addq %r14, %r10
xorq %r15, %rcx
addq %rdx, %r14
rorq $28, %rcx
movq %r10, %rax
addq %rcx, %r14
rorq $23, %rax
movq %r14, %rdx
movq %r11, %rcx
addq 176(%rbp), %r13
xorq %r12, %rcx
xorq %r10, %rax
andq %r10, %rcx
rorq $4, %rax
xorq %r12, %rcx
xorq %r10, %rax
addq %rcx, %r13
rorq $14, %rax
xorq %r15, %rdx
addq %rax, %r13
movq %r14, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r14, %rcx
xorq %r15, %rbx
rorq $6, %rcx
addq %r13, %r9
xorq %r14, %rcx
addq %rbx, %r13
rorq $28, %rcx
movq %r9, %rax
addq %rcx, %r13
rorq $23, %rax
movq %r13, %rbx
movq %r10, %rcx
addq 184(%rbp), %r12
xorq %r11, %rcx
xorq %r9, %rax
andq %r9, %rcx
rorq $4, %rax
xorq %r11, %rcx
xorq %r9, %rax
addq %rcx, %r12
rorq $14, %rax
xorq %r14, %rbx
addq %rax, %r12
movq %r13, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r13, %rcx
xorq %r14, %rdx
rorq $6, %rcx
addq %r12, %r8
xorq %r13, %rcx
addq %rdx, %r12
rorq $28, %rcx
movq %r8, %rax
addq %rcx, %r12
rorq $23, %rax
movq %r12, %rdx
movq %r9, %rcx
addq 208(%rbp), %r11
xorq %r10, %rcx
xorq %r8, %rax
andq %r8, %rcx
rorq $4, %rax
xorq %r10, %rcx
xorq %r8, %rax
addq %rcx, %r11
rorq $14, %rax
xorq %r13, %rdx
addq %rax, %r11
movq %r12, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r12, %rcx
xorq %r13, %rbx
rorq $6, %rcx
addq %r11, %r15
xorq %r12, %rcx
addq %rbx, %r11
rorq $28, %rcx
movq %r15, %rax
addq %rcx, %r11
rorq $23, %rax
movq %r11, %rbx
movq %r8, %rcx
addq 216(%rbp), %r10
xorq %r9, %rcx
xorq %r15, %rax
andq %r15, %rcx
rorq $4, %rax
xorq %r9, %rcx
xorq %r15, %rax
addq %rcx, %r10
rorq $14, %rax
xorq %r12, %rbx
addq %rax, %r10
movq %r11, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r11, %rcx
xorq %r12, %rdx
rorq $6, %rcx
addq %r10, %r14
xorq %r11, %rcx
addq %rdx, %r10
rorq $28, %rcx
movq %r14, %rax
addq %rcx, %r10
rorq $23, %rax
movq %r10, %rdx
movq %r15, %rcx
addq 240(%rbp), %r9
xorq %r8, %rcx
xorq %r14, %rax
andq %r14, %rcx
rorq $4, %rax
xorq %r8, %rcx
xorq %r14, %rax
addq %rcx, %r9
rorq $14, %rax
xorq %r11, %rdx
addq %rax, %r9
movq %r10, %rcx
andq %rdx, %rbx
rorq $5, %rcx
xorq %r10, %rcx
xorq %r11, %rbx
rorq $6, %rcx
addq %r9, %r13
xorq %r10, %rcx
addq %rbx, %r9
rorq $28, %rcx
movq %r13, %rax
addq %rcx, %r9
rorq $23, %rax
movq %r9, %rbx
movq %r14, %rcx
addq 248(%rbp), %r8
xorq %r15, %rcx
xorq %r13, %rax
andq %r13, %rcx
rorq $4, %rax
xorq %r15, %rcx
xorq %r13, %rax
addq %rcx, %r8
rorq $14, %rax
xorq %r10, %rbx
addq %rax, %r8
movq %r9, %rcx
andq %rbx, %rdx
rorq $5, %rcx
xorq %r9, %rcx
xorq %r10, %rdx
rorq $6, %rcx
addq %r8, %r12
xorq %r9, %rcx
addq %rdx, %r8
rorq $28, %rcx
movq %r12, %rax
addq %rcx, %r8
addq $0x100, %rbp
subq $0x01, %rsi
jnz L_sha512_len_avx2_tail
addq (%rdi), %r8
addq 8(%rdi), %r9
addq 16(%rdi), %r10
addq 24(%rdi), %r11
addq 32(%rdi), %r12
addq 40(%rdi), %r13
addq 48(%rdi), %r14
addq 56(%rdi), %r15
movq 224(%rdi), %rcx
addq $0x100, %rcx
subl $0x100, 1344(%rsp)
movq %rcx, 224(%rdi)
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq %r12, 32(%rdi)
movq %r13, 40(%rdi)
movq %r14, 48(%rdi)
movq %r15, 56(%rdi)
jnz L_sha512_len_avx2_begin
addq $0x548, %rsp
L_sha512_len_avx2_done:
xorq %rax, %rax
vzeroupper
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha512_AVX2_Len,.-Transform_Sha512_AVX2_Len
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_rorx_sha512_k:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xfc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x6ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x6f067aa72176fba,0xa637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx2_rorx_sha512_k_2:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xfc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0xfc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x6ca6351e003826f,0x142929670a0e6e70
.quad 0x6ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x6f067aa72176fba,0xa637dc5a2c898a6
.quad 0x6f067aa72176fba,0xa637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 8
#else
.p2align 3
#endif /* __APPLE__ */
L_avx2_rorx_sha512_k_2_end:
.quad 1024+L_avx2_rorx_sha512_k_2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_avx2_rorx_sha512_flip_mask:
.quad 0x1020304050607, 0x8090a0b0c0d0e0f
.quad 0x1020304050607, 0x8090a0b0c0d0e0f
#ifndef __APPLE__
.text
.globl Transform_Sha512_AVX2_RORX
.type Transform_Sha512_AVX2_RORX,@function
.align 16
Transform_Sha512_AVX2_RORX:
#else
.section __TEXT,__text
.globl _Transform_Sha512_AVX2_RORX
.p2align 4
_Transform_Sha512_AVX2_RORX:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x88, %rsp
leaq 64(%rdi), %rcx
vmovdqa L_avx2_rorx_sha512_flip_mask(%rip), %ymm15
movq (%rdi), %r8
movq 8(%rdi), %r9
movq 16(%rdi), %r10
movq 24(%rdi), %r11
movq 32(%rdi), %r12
movq 40(%rdi), %r13
movq 48(%rdi), %r14
movq 56(%rdi), %r15
vmovdqu (%rcx), %ymm0
vmovdqu 32(%rcx), %ymm1
vpshufb %ymm15, %ymm0, %ymm0
vpshufb %ymm15, %ymm1, %ymm1
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpshufb %ymm15, %ymm2, %ymm2
vpshufb %ymm15, %ymm3, %ymm3
movl $4, 128(%rsp)
leaq L_avx2_rorx_sha512_k(%rip), %rsi
movq %r9, %rbx
xorq %rdx, %rdx
xorq %r10, %rbx
# set_w_k: 0
vpaddq (%rsi), %ymm0, %ymm8
vpaddq 32(%rsi), %ymm1, %ymm9
vmovdqu %ymm8, (%rsp)
vmovdqu %ymm9, 32(%rsp)
vpaddq 64(%rsi), %ymm2, %ymm8
vpaddq 96(%rsi), %ymm3, %ymm9
vmovdqu %ymm8, 64(%rsp)
vmovdqu %ymm9, 96(%rsp)
# Start of 16 rounds
L_sha256_len_avx2_rorx_start:
addq $0x80, %rsi
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
vpblendd $3, %ymm1, %ymm0, %ymm12
vpblendd $3, %ymm3, %ymm2, %ymm13
addq (%rsp), %r15
movq %r13, %rdx
xorq %rax, %rcx
vpermq $57, %ymm12, %ymm12
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
vpermq $57, %ymm13, %ymm13
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
vpsrlq $7, %ymm12, %ymm11
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
vperm2I128 $0x81, %ymm3, %ymm3, %ymm14
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
vpxor %ymm10, %ymm8, %ymm8
addq 8(%rsp), %r14
movq %r12, %rbx
xorq %rax, %rcx
vpxor %ymm11, %ymm8, %ymm8
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
vpaddq %ymm0, %ymm13, %ymm0
vpaddq %ymm0, %ymm8, %ymm0
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
vpsrlq $19, %ymm14, %ymm8
vpsllq $45, %ymm14, %ymm9
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
vpsrlq $61, %ymm14, %ymm10
vpsllq $3, %ymm14, %ymm11
vpor %ymm9, %ymm8, %ymm8
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
vpor %ymm11, %ymm10, %ymm10
movq %r8, %rbx
leaq (%r10,%r14,1), %r10
xorq %r15, %rbx
vpxor %ymm10, %ymm8, %ymm8
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
vpsrlq $6, %ymm14, %ymm11
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
vpxor %ymm11, %ymm8, %ymm8
addq 16(%rsp), %r13
movq %r11, %rdx
xorq %rax, %rcx
vpaddq %ymm0, %ymm8, %ymm0
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
vperm2I128 $8, %ymm0, %ymm0, %ymm14
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
vpsrlq $19, %ymm14, %ymm8
vpsllq $45, %ymm14, %ymm9
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
vpsrlq $61, %ymm14, %ymm10
vpsllq $3, %ymm14, %ymm11
vpor %ymm9, %ymm8, %ymm8
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
vpor %ymm11, %ymm10, %ymm10
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
vpxor %ymm10, %ymm8, %ymm8
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
vpsrlq $6, %ymm14, %ymm11
addq 24(%rsp), %r12
movq %r10, %rbx
xorq %rax, %rcx
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
vpxor %ymm11, %ymm8, %ymm8
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
vpaddq %ymm0, %ymm8, %ymm0
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
vpaddq (%rsi), %ymm0, %ymm8
movq %r14, %rbx
leaq (%r8,%r12,1), %r8
xorq %r13, %rbx
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
vmovdqu %ymm8, (%rsp)
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
vpblendd $3, %ymm2, %ymm1, %ymm12
vpblendd $3, %ymm0, %ymm3, %ymm13
addq 32(%rsp), %r11
movq %r9, %rdx
xorq %rax, %rcx
vpermq $57, %ymm12, %ymm12
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
vpermq $57, %ymm13, %ymm13
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
vpsrlq $7, %ymm12, %ymm11
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
vperm2I128 $0x81, %ymm0, %ymm0, %ymm14
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
vpxor %ymm10, %ymm8, %ymm8
addq 40(%rsp), %r10
movq %r8, %rbx
xorq %rax, %rcx
vpxor %ymm11, %ymm8, %ymm8
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
vpaddq %ymm1, %ymm13, %ymm1
vpaddq %ymm1, %ymm8, %ymm1
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
vpsrlq $19, %ymm14, %ymm8
vpsllq $45, %ymm14, %ymm9
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
vpsrlq $61, %ymm14, %ymm10
vpsllq $3, %ymm14, %ymm11
vpor %ymm9, %ymm8, %ymm8
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
vpor %ymm11, %ymm10, %ymm10
movq %r12, %rbx
leaq (%r14,%r10,1), %r14
xorq %r11, %rbx
vpxor %ymm10, %ymm8, %ymm8
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
vpsrlq $6, %ymm14, %ymm11
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
vpxor %ymm11, %ymm8, %ymm8
addq 48(%rsp), %r9
movq %r15, %rdx
xorq %rax, %rcx
vpaddq %ymm1, %ymm8, %ymm1
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
vperm2I128 $8, %ymm1, %ymm1, %ymm14
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
vpsrlq $19, %ymm14, %ymm8
vpsllq $45, %ymm14, %ymm9
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
vpsrlq $61, %ymm14, %ymm10
vpsllq $3, %ymm14, %ymm11
vpor %ymm9, %ymm8, %ymm8
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
vpor %ymm11, %ymm10, %ymm10
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
vpxor %ymm10, %ymm8, %ymm8
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
vpsrlq $6, %ymm14, %ymm11
addq 56(%rsp), %r8
movq %r14, %rbx
xorq %rax, %rcx
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
vpxor %ymm11, %ymm8, %ymm8
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
vpaddq %ymm1, %ymm8, %ymm1
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
vpaddq 32(%rsi), %ymm1, %ymm8
movq %r10, %rbx
leaq (%r12,%r8,1), %r12
xorq %r9, %rbx
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
vmovdqu %ymm8, 32(%rsp)
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
vpblendd $3, %ymm3, %ymm2, %ymm12
vpblendd $3, %ymm1, %ymm0, %ymm13
addq 64(%rsp), %r15
movq %r13, %rdx
xorq %rax, %rcx
vpermq $57, %ymm12, %ymm12
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
vpermq $57, %ymm13, %ymm13
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
vpsrlq $7, %ymm12, %ymm11
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
vperm2I128 $0x81, %ymm1, %ymm1, %ymm14
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
vpxor %ymm10, %ymm8, %ymm8
addq 72(%rsp), %r14
movq %r12, %rbx
xorq %rax, %rcx
vpxor %ymm11, %ymm8, %ymm8
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
vpaddq %ymm2, %ymm13, %ymm2
vpaddq %ymm2, %ymm8, %ymm2
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
vpsrlq $19, %ymm14, %ymm8
vpsllq $45, %ymm14, %ymm9
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
vpsrlq $61, %ymm14, %ymm10
vpsllq $3, %ymm14, %ymm11
vpor %ymm9, %ymm8, %ymm8
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
vpor %ymm11, %ymm10, %ymm10
movq %r8, %rbx
leaq (%r10,%r14,1), %r10
xorq %r15, %rbx
vpxor %ymm10, %ymm8, %ymm8
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
vpsrlq $6, %ymm14, %ymm11
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
vpxor %ymm11, %ymm8, %ymm8
addq 80(%rsp), %r13
movq %r11, %rdx
xorq %rax, %rcx
vpaddq %ymm2, %ymm8, %ymm2
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
vperm2I128 $8, %ymm2, %ymm2, %ymm14
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
vpsrlq $19, %ymm14, %ymm8
vpsllq $45, %ymm14, %ymm9
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
vpsrlq $61, %ymm14, %ymm10
vpsllq $3, %ymm14, %ymm11
vpor %ymm9, %ymm8, %ymm8
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
vpor %ymm11, %ymm10, %ymm10
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
vpxor %ymm10, %ymm8, %ymm8
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
vpsrlq $6, %ymm14, %ymm11
addq 88(%rsp), %r12
movq %r10, %rbx
xorq %rax, %rcx
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
vpxor %ymm11, %ymm8, %ymm8
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
vpaddq %ymm2, %ymm8, %ymm2
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
vpaddq 64(%rsi), %ymm2, %ymm8
movq %r14, %rbx
leaq (%r8,%r12,1), %r8
xorq %r13, %rbx
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
vmovdqu %ymm8, 64(%rsp)
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
vpblendd $3, %ymm0, %ymm3, %ymm12
vpblendd $3, %ymm2, %ymm1, %ymm13
addq 96(%rsp), %r11
movq %r9, %rdx
xorq %rax, %rcx
vpermq $57, %ymm12, %ymm12
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
vpermq $57, %ymm13, %ymm13
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
vpsrlq $7, %ymm12, %ymm11
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
vperm2I128 $0x81, %ymm2, %ymm2, %ymm14
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
vpxor %ymm10, %ymm8, %ymm8
addq 104(%rsp), %r10
movq %r8, %rbx
xorq %rax, %rcx
vpxor %ymm11, %ymm8, %ymm8
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
vpaddq %ymm3, %ymm13, %ymm3
vpaddq %ymm3, %ymm8, %ymm3
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
vpsrlq $19, %ymm14, %ymm8
vpsllq $45, %ymm14, %ymm9
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
vpsrlq $61, %ymm14, %ymm10
vpsllq $3, %ymm14, %ymm11
vpor %ymm9, %ymm8, %ymm8
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
vpor %ymm11, %ymm10, %ymm10
movq %r12, %rbx
leaq (%r14,%r10,1), %r14
xorq %r11, %rbx
vpxor %ymm10, %ymm8, %ymm8
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
vpsrlq $6, %ymm14, %ymm11
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
vpxor %ymm11, %ymm8, %ymm8
addq 112(%rsp), %r9
movq %r15, %rdx
xorq %rax, %rcx
vpaddq %ymm3, %ymm8, %ymm3
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
vperm2I128 $8, %ymm3, %ymm3, %ymm14
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
vpsrlq $19, %ymm14, %ymm8
vpsllq $45, %ymm14, %ymm9
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
vpsrlq $61, %ymm14, %ymm10
vpsllq $3, %ymm14, %ymm11
vpor %ymm9, %ymm8, %ymm8
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
vpor %ymm11, %ymm10, %ymm10
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
vpxor %ymm10, %ymm8, %ymm8
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
vpsrlq $6, %ymm14, %ymm11
addq 120(%rsp), %r8
movq %r14, %rbx
xorq %rax, %rcx
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
vpxor %ymm11, %ymm8, %ymm8
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
vpaddq %ymm3, %ymm8, %ymm3
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
vpaddq 96(%rsi), %ymm3, %ymm8
movq %r10, %rbx
leaq (%r12,%r8,1), %r12
xorq %r9, %rbx
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
vmovdqu %ymm8, 96(%rsp)
subl $0x01, 128(%rsp)
jne L_sha256_len_avx2_rorx_start
# rnd_all_4: 0-3
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
addq (%rsp), %r15
movq %r13, %rdx
xorq %rax, %rcx
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
addq 8(%rsp), %r14
movq %r12, %rbx
xorq %rax, %rcx
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
leaq (%r10,%r14,1), %r10
xorq %r15, %rbx
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
addq 16(%rsp), %r13
movq %r11, %rdx
xorq %rax, %rcx
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
addq 24(%rsp), %r12
movq %r10, %rbx
xorq %rax, %rcx
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
leaq (%r8,%r12,1), %r8
xorq %r13, %rbx
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
# rnd_all_4: 4-7
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
addq 32(%rsp), %r11
movq %r9, %rdx
xorq %rax, %rcx
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
addq 40(%rsp), %r10
movq %r8, %rbx
xorq %rax, %rcx
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
leaq (%r14,%r10,1), %r14
xorq %r11, %rbx
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
addq 48(%rsp), %r9
movq %r15, %rdx
xorq %rax, %rcx
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
addq 56(%rsp), %r8
movq %r14, %rbx
xorq %rax, %rcx
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
leaq (%r12,%r8,1), %r12
xorq %r9, %rbx
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
# rnd_all_4: 8-11
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
addq 64(%rsp), %r15
movq %r13, %rdx
xorq %rax, %rcx
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
addq 72(%rsp), %r14
movq %r12, %rbx
xorq %rax, %rcx
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
leaq (%r10,%r14,1), %r10
xorq %r15, %rbx
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
addq 80(%rsp), %r13
movq %r11, %rdx
xorq %rax, %rcx
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
addq 88(%rsp), %r12
movq %r10, %rbx
xorq %rax, %rcx
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
leaq (%r8,%r12,1), %r8
xorq %r13, %rbx
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
# rnd_all_4: 12-15
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
addq 96(%rsp), %r11
movq %r9, %rdx
xorq %rax, %rcx
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
addq 104(%rsp), %r10
movq %r8, %rbx
xorq %rax, %rcx
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
leaq (%r14,%r10,1), %r14
xorq %r11, %rbx
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
addq 112(%rsp), %r9
movq %r15, %rdx
xorq %rax, %rcx
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
addq 120(%rsp), %r8
movq %r14, %rbx
xorq %rax, %rcx
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
leaq (%r12,%r8,1), %r12
xorq %r9, %rbx
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
addq %rdx, %r8
addq %r8, (%rdi)
addq %r9, 8(%rdi)
addq %r10, 16(%rdi)
addq %r11, 24(%rdi)
addq %r12, 32(%rdi)
addq %r13, 40(%rdi)
addq %r14, 48(%rdi)
addq %r15, 56(%rdi)
xorq %rax, %rax
vzeroupper
addq $0x88, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha512_AVX2_RORX,.-Transform_Sha512_AVX2_RORX
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl Transform_Sha512_AVX2_RORX_Len
.type Transform_Sha512_AVX2_RORX_Len,@function
.align 16
Transform_Sha512_AVX2_RORX_Len:
#else
.section __TEXT,__text
.globl _Transform_Sha512_AVX2_RORX_Len
.p2align 4
_Transform_Sha512_AVX2_RORX_Len:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
testb $0x80, %sil
je L_sha512_len_avx2_rorx_block
movq 224(%rdi), %rax
push %rsi
vmovdqu (%rax), %ymm0
vmovdqu 32(%rax), %ymm1
vmovdqu 64(%rax), %ymm2
vmovdqu 96(%rax), %ymm3
vmovups %ymm0, 64(%rdi)
vmovups %ymm1, 96(%rdi)
vmovups %ymm2, 128(%rdi)
vmovups %ymm3, 160(%rdi)
#ifndef __APPLE__
call Transform_Sha512_AVX2_RORX@plt
#else
call _Transform_Sha512_AVX2_RORX
#endif /* __APPLE__ */
pop %rsi
addq $0x80, 224(%rdi)
subl $0x80, %esi
jz L_sha512_len_avx2_rorx_done
L_sha512_len_avx2_rorx_block:
subq $0x548, %rsp
movq 224(%rdi), %rax
vmovdqa L_avx2_rorx_sha512_flip_mask(%rip), %ymm15
movq (%rdi), %r8
movq 8(%rdi), %r9
movq 16(%rdi), %r10
movq 24(%rdi), %r11
movq 32(%rdi), %r12
movq 40(%rdi), %r13
movq 48(%rdi), %r14
movq 56(%rdi), %r15
movl %esi, 1344(%rsp)
# Start of loop processing two blocks
L_sha512_len_avx2_rorx_begin:
movq %rsp, %rsi
leaq L_avx2_rorx_sha512_k_2(%rip), %rbp
movq %r9, %rbx
xorq %rdx, %rdx
vmovdqu (%rax), %xmm0
vmovdqu 16(%rax), %xmm1
vinserti128 $0x01, 128(%rax), %ymm0, %ymm0
vinserti128 $0x01, 144(%rax), %ymm1, %ymm1
vpshufb %ymm15, %ymm0, %ymm0
vpshufb %ymm15, %ymm1, %ymm1
vmovdqu 32(%rax), %xmm2
vmovdqu 48(%rax), %xmm3
vinserti128 $0x01, 160(%rax), %ymm2, %ymm2
vinserti128 $0x01, 176(%rax), %ymm3, %ymm3
vpshufb %ymm15, %ymm2, %ymm2
vpshufb %ymm15, %ymm3, %ymm3
vmovdqu 64(%rax), %xmm4
vmovdqu 80(%rax), %xmm5
vinserti128 $0x01, 192(%rax), %ymm4, %ymm4
vinserti128 $0x01, 208(%rax), %ymm5, %ymm5
vpshufb %ymm15, %ymm4, %ymm4
vpshufb %ymm15, %ymm5, %ymm5
vmovdqu 96(%rax), %xmm6
vmovdqu 112(%rax), %xmm7
vinserti128 $0x01, 224(%rax), %ymm6, %ymm6
vinserti128 $0x01, 240(%rax), %ymm7, %ymm7
vpshufb %ymm15, %ymm6, %ymm6
vpshufb %ymm15, %ymm7, %ymm7
xorq %r10, %rbx
# Start of 16 rounds
L_sha512_len_avx2_rorx_start:
vpaddq (%rbp), %ymm0, %ymm8
vpaddq 32(%rbp), %ymm1, %ymm9
vmovdqu %ymm8, (%rsi)
vmovdqu %ymm9, 32(%rsi)
vpaddq 64(%rbp), %ymm2, %ymm8
vpaddq 96(%rbp), %ymm3, %ymm9
vmovdqu %ymm8, 64(%rsi)
vmovdqu %ymm9, 96(%rsi)
vpaddq 128(%rbp), %ymm4, %ymm8
vpaddq 160(%rbp), %ymm5, %ymm9
vmovdqu %ymm8, 128(%rsi)
vmovdqu %ymm9, 160(%rsi)
vpaddq 192(%rbp), %ymm6, %ymm8
vpaddq 224(%rbp), %ymm7, %ymm9
vmovdqu %ymm8, 192(%rsi)
vmovdqu %ymm9, 224(%rsi)
# msg_sched: 0-1
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
vpalignr $8, %ymm0, %ymm1, %ymm12
addq (%rsi), %r15
movq %r13, %rdx
xorq %rax, %rcx
vpalignr $8, %ymm4, %ymm5, %ymm13
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm0, %ymm13, %ymm0
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
vpaddq %ymm0, %ymm8, %ymm0
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
vpsrlq $19, %ymm7, %ymm8
vpsllq $45, %ymm7, %ymm9
addq 8(%rsi), %r14
movq %r12, %rbx
xorq %rax, %rcx
vpsrlq $61, %ymm7, %ymm10
vpsllq $3, %ymm7, %ymm11
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm7, %ymm11
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
leaq (%r10,%r14,1), %r10
xorq %r15, %rbx
vpxor %ymm11, %ymm8, %ymm8
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
vpaddq %ymm0, %ymm8, %ymm0
# msg_sched done: 0-3
# msg_sched: 4-5
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
vpalignr $8, %ymm1, %ymm2, %ymm12
addq 32(%rsi), %r13
movq %r11, %rdx
xorq %rax, %rcx
vpalignr $8, %ymm5, %ymm6, %ymm13
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm1, %ymm13, %ymm1
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
vpaddq %ymm1, %ymm8, %ymm1
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
vpsrlq $19, %ymm0, %ymm8
vpsllq $45, %ymm0, %ymm9
addq 40(%rsi), %r12
movq %r10, %rbx
xorq %rax, %rcx
vpsrlq $61, %ymm0, %ymm10
vpsllq $3, %ymm0, %ymm11
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm0, %ymm11
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
leaq (%r8,%r12,1), %r8
xorq %r13, %rbx
vpxor %ymm11, %ymm8, %ymm8
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
vpaddq %ymm1, %ymm8, %ymm1
# msg_sched done: 4-7
# msg_sched: 8-9
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
vpalignr $8, %ymm2, %ymm3, %ymm12
addq 64(%rsi), %r11
movq %r9, %rdx
xorq %rax, %rcx
vpalignr $8, %ymm6, %ymm7, %ymm13
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm2, %ymm13, %ymm2
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
vpaddq %ymm2, %ymm8, %ymm2
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
vpsrlq $19, %ymm1, %ymm8
vpsllq $45, %ymm1, %ymm9
addq 72(%rsi), %r10
movq %r8, %rbx
xorq %rax, %rcx
vpsrlq $61, %ymm1, %ymm10
vpsllq $3, %ymm1, %ymm11
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm1, %ymm11
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
leaq (%r14,%r10,1), %r14
xorq %r11, %rbx
vpxor %ymm11, %ymm8, %ymm8
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
vpaddq %ymm2, %ymm8, %ymm2
# msg_sched done: 8-11
# msg_sched: 12-13
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
vpalignr $8, %ymm3, %ymm4, %ymm12
addq 96(%rsi), %r9
movq %r15, %rdx
xorq %rax, %rcx
vpalignr $8, %ymm7, %ymm0, %ymm13
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm3, %ymm13, %ymm3
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
vpaddq %ymm3, %ymm8, %ymm3
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
vpsrlq $19, %ymm2, %ymm8
vpsllq $45, %ymm2, %ymm9
addq 104(%rsi), %r8
movq %r14, %rbx
xorq %rax, %rcx
vpsrlq $61, %ymm2, %ymm10
vpsllq $3, %ymm2, %ymm11
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm2, %ymm11
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
leaq (%r12,%r8,1), %r12
xorq %r9, %rbx
vpxor %ymm11, %ymm8, %ymm8
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
vpaddq %ymm3, %ymm8, %ymm3
# msg_sched done: 12-15
# msg_sched: 16-17
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
vpalignr $8, %ymm4, %ymm5, %ymm12
addq 128(%rsi), %r15
movq %r13, %rdx
xorq %rax, %rcx
vpalignr $8, %ymm0, %ymm1, %ymm13
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm4, %ymm13, %ymm4
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
vpaddq %ymm4, %ymm8, %ymm4
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
vpsrlq $19, %ymm3, %ymm8
vpsllq $45, %ymm3, %ymm9
addq 136(%rsi), %r14
movq %r12, %rbx
xorq %rax, %rcx
vpsrlq $61, %ymm3, %ymm10
vpsllq $3, %ymm3, %ymm11
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm3, %ymm11
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
leaq (%r10,%r14,1), %r10
xorq %r15, %rbx
vpxor %ymm11, %ymm8, %ymm8
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
vpaddq %ymm4, %ymm8, %ymm4
# msg_sched done: 16-19
# msg_sched: 20-21
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
vpalignr $8, %ymm5, %ymm6, %ymm12
addq 160(%rsi), %r13
movq %r11, %rdx
xorq %rax, %rcx
vpalignr $8, %ymm1, %ymm2, %ymm13
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm5, %ymm13, %ymm5
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
vpaddq %ymm5, %ymm8, %ymm5
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
vpsrlq $19, %ymm4, %ymm8
vpsllq $45, %ymm4, %ymm9
addq 168(%rsi), %r12
movq %r10, %rbx
xorq %rax, %rcx
vpsrlq $61, %ymm4, %ymm10
vpsllq $3, %ymm4, %ymm11
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm4, %ymm11
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
leaq (%r8,%r12,1), %r8
xorq %r13, %rbx
vpxor %ymm11, %ymm8, %ymm8
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
vpaddq %ymm5, %ymm8, %ymm5
# msg_sched done: 20-23
# msg_sched: 24-25
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
vpalignr $8, %ymm6, %ymm7, %ymm12
addq 192(%rsi), %r11
movq %r9, %rdx
xorq %rax, %rcx
vpalignr $8, %ymm2, %ymm3, %ymm13
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm6, %ymm13, %ymm6
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
vpaddq %ymm6, %ymm8, %ymm6
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
vpsrlq $19, %ymm5, %ymm8
vpsllq $45, %ymm5, %ymm9
addq 200(%rsi), %r10
movq %r8, %rbx
xorq %rax, %rcx
vpsrlq $61, %ymm5, %ymm10
vpsllq $3, %ymm5, %ymm11
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm5, %ymm11
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
leaq (%r14,%r10,1), %r14
xorq %r11, %rbx
vpxor %ymm11, %ymm8, %ymm8
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
vpaddq %ymm6, %ymm8, %ymm6
# msg_sched done: 24-27
# msg_sched: 28-29
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
vpalignr $8, %ymm7, %ymm0, %ymm12
addq 224(%rsi), %r9
movq %r15, %rdx
xorq %rax, %rcx
vpalignr $8, %ymm3, %ymm4, %ymm13
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
vpsrlq $0x01, %ymm12, %ymm8
vpsllq $63, %ymm12, %ymm9
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
vpsrlq $8, %ymm12, %ymm10
vpsllq $56, %ymm12, %ymm11
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
vpsrlq $7, %ymm12, %ymm11
vpxor %ymm10, %ymm8, %ymm8
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
vpxor %ymm11, %ymm8, %ymm8
vpaddq %ymm7, %ymm13, %ymm7
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
vpaddq %ymm7, %ymm8, %ymm7
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
vpsrlq $19, %ymm6, %ymm8
vpsllq $45, %ymm6, %ymm9
addq 232(%rsi), %r8
movq %r14, %rbx
xorq %rax, %rcx
vpsrlq $61, %ymm6, %ymm10
vpsllq $3, %ymm6, %ymm11
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
vpor %ymm9, %ymm8, %ymm8
vpor %ymm11, %ymm10, %ymm10
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
vpxor %ymm10, %ymm8, %ymm8
vpsrlq $6, %ymm6, %ymm11
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
leaq (%r12,%r8,1), %r12
xorq %r9, %rbx
vpxor %ymm11, %ymm8, %ymm8
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
vpaddq %ymm7, %ymm8, %ymm7
# msg_sched done: 28-31
addq $0x100, %rbp
addq $0x100, %rsi
cmpq L_avx2_rorx_sha512_k_2_end(%rip), %rbp
jne L_sha512_len_avx2_rorx_start
vpaddq (%rbp), %ymm0, %ymm8
vpaddq 32(%rbp), %ymm1, %ymm9
vmovdqu %ymm8, (%rsi)
vmovdqu %ymm9, 32(%rsi)
vpaddq 64(%rbp), %ymm2, %ymm8
vpaddq 96(%rbp), %ymm3, %ymm9
vmovdqu %ymm8, 64(%rsi)
vmovdqu %ymm9, 96(%rsi)
vpaddq 128(%rbp), %ymm4, %ymm8
vpaddq 160(%rbp), %ymm5, %ymm9
vmovdqu %ymm8, 128(%rsi)
vmovdqu %ymm9, 160(%rsi)
vpaddq 192(%rbp), %ymm6, %ymm8
vpaddq 224(%rbp), %ymm7, %ymm9
vmovdqu %ymm8, 192(%rsi)
vmovdqu %ymm9, 224(%rsi)
# rnd_all_2: 0-1
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
addq (%rsi), %r15
movq %r13, %rdx
xorq %rax, %rcx
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
addq 8(%rsi), %r14
movq %r12, %rbx
xorq %rax, %rcx
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
leaq (%r10,%r14,1), %r10
xorq %r15, %rbx
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
# rnd_all_2: 4-5
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
addq 32(%rsi), %r13
movq %r11, %rdx
xorq %rax, %rcx
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
addq 40(%rsi), %r12
movq %r10, %rbx
xorq %rax, %rcx
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
leaq (%r8,%r12,1), %r8
xorq %r13, %rbx
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
# rnd_all_2: 8-9
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
addq 64(%rsi), %r11
movq %r9, %rdx
xorq %rax, %rcx
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
addq 72(%rsi), %r10
movq %r8, %rbx
xorq %rax, %rcx
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
leaq (%r14,%r10,1), %r14
xorq %r11, %rbx
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
# rnd_all_2: 12-13
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
addq 96(%rsi), %r9
movq %r15, %rdx
xorq %rax, %rcx
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
addq 104(%rsi), %r8
movq %r14, %rbx
xorq %rax, %rcx
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
leaq (%r12,%r8,1), %r12
xorq %r9, %rbx
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
# rnd_all_2: 16-17
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
addq 128(%rsi), %r15
movq %r13, %rdx
xorq %rax, %rcx
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
addq 136(%rsi), %r14
movq %r12, %rbx
xorq %rax, %rcx
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
leaq (%r10,%r14,1), %r10
xorq %r15, %rbx
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
# rnd_all_2: 20-21
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
addq 160(%rsi), %r13
movq %r11, %rdx
xorq %rax, %rcx
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
addq 168(%rsi), %r12
movq %r10, %rbx
xorq %rax, %rcx
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
leaq (%r8,%r12,1), %r8
xorq %r13, %rbx
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
# rnd_all_2: 24-25
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
addq 192(%rsi), %r11
movq %r9, %rdx
xorq %rax, %rcx
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
addq 200(%rsi), %r10
movq %r8, %rbx
xorq %rax, %rcx
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
leaq (%r14,%r10,1), %r14
xorq %r11, %rbx
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
# rnd_all_2: 28-29
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
addq 224(%rsi), %r9
movq %r15, %rdx
xorq %rax, %rcx
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
addq 232(%rsi), %r8
movq %r14, %rbx
xorq %rax, %rcx
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
leaq (%r12,%r8,1), %r12
xorq %r9, %rbx
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
addq %rdx, %r8
subq $0x400, %rsi
addq (%rdi), %r8
addq 8(%rdi), %r9
addq 16(%rdi), %r10
addq 24(%rdi), %r11
addq 32(%rdi), %r12
addq 40(%rdi), %r13
addq 48(%rdi), %r14
addq 56(%rdi), %r15
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq %r12, 32(%rdi)
movq %r13, 40(%rdi)
movq %r14, 48(%rdi)
movq %r15, 56(%rdi)
movq %r9, %rbx
xorq %rdx, %rdx
xorq %r10, %rbx
movq $5, %rbp
L_sha512_len_avx2_rorx_tail:
# rnd_all_2: 2-3
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
addq 16(%rsi), %r15
movq %r13, %rdx
xorq %rax, %rcx
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
addq 24(%rsi), %r14
movq %r12, %rbx
xorq %rax, %rcx
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
leaq (%r10,%r14,1), %r10
xorq %r15, %rbx
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
# rnd_all_2: 6-7
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
addq 48(%rsi), %r13
movq %r11, %rdx
xorq %rax, %rcx
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
addq 56(%rsi), %r12
movq %r10, %rbx
xorq %rax, %rcx
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
leaq (%r8,%r12,1), %r8
xorq %r13, %rbx
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
# rnd_all_2: 10-11
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
addq 80(%rsi), %r11
movq %r9, %rdx
xorq %rax, %rcx
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
addq 88(%rsi), %r10
movq %r8, %rbx
xorq %rax, %rcx
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
leaq (%r14,%r10,1), %r14
xorq %r11, %rbx
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
# rnd_all_2: 14-15
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
addq 112(%rsi), %r9
movq %r15, %rdx
xorq %rax, %rcx
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
addq 120(%rsi), %r8
movq %r14, %rbx
xorq %rax, %rcx
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
leaq (%r12,%r8,1), %r12
xorq %r9, %rbx
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
# rnd_all_2: 18-19
rorxq $14, %r12, %rax
rorxq $18, %r12, %rcx
addq %rdx, %r8
addq 144(%rsi), %r15
movq %r13, %rdx
xorq %rax, %rcx
xorq %r14, %rdx
rorxq $41, %r12, %rax
xorq %rcx, %rax
andq %r12, %rdx
addq %rax, %r15
rorxq $28, %r8, %rax
rorxq $34, %r8, %rcx
xorq %r14, %rdx
xorq %rax, %rcx
rorxq $39, %r8, %rax
addq %rdx, %r15
xorq %rcx, %rax
movq %r9, %rdx
addq %r15, %r11
xorq %r8, %rdx
andq %rdx, %rbx
addq %rax, %r15
xorq %r9, %rbx
rorxq $14, %r11, %rax
rorxq $18, %r11, %rcx
addq %rbx, %r15
addq 152(%rsi), %r14
movq %r12, %rbx
xorq %rax, %rcx
xorq %r13, %rbx
rorxq $41, %r11, %rax
xorq %rcx, %rax
andq %r11, %rbx
addq %rax, %r14
rorxq $28, %r15, %rax
rorxq $34, %r15, %rcx
xorq %r13, %rbx
xorq %rax, %rcx
rorxq $39, %r15, %rax
addq %rbx, %r14
xorq %rcx, %rax
movq %r8, %rbx
leaq (%r10,%r14,1), %r10
xorq %r15, %rbx
andq %rbx, %rdx
addq %rax, %r14
xorq %r8, %rdx
# rnd_all_2: 22-23
rorxq $14, %r10, %rax
rorxq $18, %r10, %rcx
addq %rdx, %r14
addq 176(%rsi), %r13
movq %r11, %rdx
xorq %rax, %rcx
xorq %r12, %rdx
rorxq $41, %r10, %rax
xorq %rcx, %rax
andq %r10, %rdx
addq %rax, %r13
rorxq $28, %r14, %rax
rorxq $34, %r14, %rcx
xorq %r12, %rdx
xorq %rax, %rcx
rorxq $39, %r14, %rax
addq %rdx, %r13
xorq %rcx, %rax
movq %r15, %rdx
addq %r13, %r9
xorq %r14, %rdx
andq %rdx, %rbx
addq %rax, %r13
xorq %r15, %rbx
rorxq $14, %r9, %rax
rorxq $18, %r9, %rcx
addq %rbx, %r13
addq 184(%rsi), %r12
movq %r10, %rbx
xorq %rax, %rcx
xorq %r11, %rbx
rorxq $41, %r9, %rax
xorq %rcx, %rax
andq %r9, %rbx
addq %rax, %r12
rorxq $28, %r13, %rax
rorxq $34, %r13, %rcx
xorq %r11, %rbx
xorq %rax, %rcx
rorxq $39, %r13, %rax
addq %rbx, %r12
xorq %rcx, %rax
movq %r14, %rbx
leaq (%r8,%r12,1), %r8
xorq %r13, %rbx
andq %rbx, %rdx
addq %rax, %r12
xorq %r14, %rdx
# rnd_all_2: 26-27
rorxq $14, %r8, %rax
rorxq $18, %r8, %rcx
addq %rdx, %r12
addq 208(%rsi), %r11
movq %r9, %rdx
xorq %rax, %rcx
xorq %r10, %rdx
rorxq $41, %r8, %rax
xorq %rcx, %rax
andq %r8, %rdx
addq %rax, %r11
rorxq $28, %r12, %rax
rorxq $34, %r12, %rcx
xorq %r10, %rdx
xorq %rax, %rcx
rorxq $39, %r12, %rax
addq %rdx, %r11
xorq %rcx, %rax
movq %r13, %rdx
addq %r11, %r15
xorq %r12, %rdx
andq %rdx, %rbx
addq %rax, %r11
xorq %r13, %rbx
rorxq $14, %r15, %rax
rorxq $18, %r15, %rcx
addq %rbx, %r11
addq 216(%rsi), %r10
movq %r8, %rbx
xorq %rax, %rcx
xorq %r9, %rbx
rorxq $41, %r15, %rax
xorq %rcx, %rax
andq %r15, %rbx
addq %rax, %r10
rorxq $28, %r11, %rax
rorxq $34, %r11, %rcx
xorq %r9, %rbx
xorq %rax, %rcx
rorxq $39, %r11, %rax
addq %rbx, %r10
xorq %rcx, %rax
movq %r12, %rbx
leaq (%r14,%r10,1), %r14
xorq %r11, %rbx
andq %rbx, %rdx
addq %rax, %r10
xorq %r12, %rdx
# rnd_all_2: 30-31
rorxq $14, %r14, %rax
rorxq $18, %r14, %rcx
addq %rdx, %r10
addq 240(%rsi), %r9
movq %r15, %rdx
xorq %rax, %rcx
xorq %r8, %rdx
rorxq $41, %r14, %rax
xorq %rcx, %rax
andq %r14, %rdx
addq %rax, %r9
rorxq $28, %r10, %rax
rorxq $34, %r10, %rcx
xorq %r8, %rdx
xorq %rax, %rcx
rorxq $39, %r10, %rax
addq %rdx, %r9
xorq %rcx, %rax
movq %r11, %rdx
addq %r9, %r13
xorq %r10, %rdx
andq %rdx, %rbx
addq %rax, %r9
xorq %r11, %rbx
rorxq $14, %r13, %rax
rorxq $18, %r13, %rcx
addq %rbx, %r9
addq 248(%rsi), %r8
movq %r14, %rbx
xorq %rax, %rcx
xorq %r15, %rbx
rorxq $41, %r13, %rax
xorq %rcx, %rax
andq %r13, %rbx
addq %rax, %r8
rorxq $28, %r9, %rax
rorxq $34, %r9, %rcx
xorq %r15, %rbx
xorq %rax, %rcx
rorxq $39, %r9, %rax
addq %rbx, %r8
xorq %rcx, %rax
movq %r10, %rbx
leaq (%r12,%r8,1), %r12
xorq %r9, %rbx
andq %rbx, %rdx
addq %rax, %r8
xorq %r10, %rdx
addq $0x100, %rsi
subq $0x01, %rbp
jnz L_sha512_len_avx2_rorx_tail
addq %rdx, %r8
addq (%rdi), %r8
addq 8(%rdi), %r9
addq 16(%rdi), %r10
addq 24(%rdi), %r11
addq 32(%rdi), %r12
addq 40(%rdi), %r13
addq 48(%rdi), %r14
addq 56(%rdi), %r15
movq 224(%rdi), %rax
addq $0x100, %rax
subl $0x100, 1344(%rsp)
movq %rax, 224(%rdi)
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq %r12, 32(%rdi)
movq %r13, 40(%rdi)
movq %r14, 48(%rdi)
movq %r15, 56(%rdi)
jnz L_sha512_len_avx2_rorx_begin
addq $0x548, %rsp
L_sha512_len_avx2_rorx_done:
xorq %rax, %rax
vzeroupper
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha512_AVX2_RORX_Len,.-Transform_Sha512_AVX2_RORX_Len
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
aerisarn/mesa-uwp
| 89,612
|
src/util/blake3/blake3_avx512_x86-64_unix.S
|
#include "mesa_blake3_visibility.h"
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__ELF__) && defined(__CET__) && defined(__has_include)
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#endif
#if !defined(_CET_ENDBR)
#define _CET_ENDBR
#endif
.intel_syntax noprefix
HIDDEN _blake3_hash_many_avx512
HIDDEN blake3_hash_many_avx512
HIDDEN blake3_compress_in_place_avx512
HIDDEN _blake3_compress_in_place_avx512
HIDDEN blake3_compress_xof_avx512
HIDDEN _blake3_compress_xof_avx512
.global _blake3_hash_many_avx512
.global blake3_hash_many_avx512
.global blake3_compress_in_place_avx512
.global _blake3_compress_in_place_avx512
.global blake3_compress_xof_avx512
.global _blake3_compress_xof_avx512
#ifdef __APPLE__
.text
#else
.section .text
#endif
.p2align 6
_blake3_hash_many_avx512:
blake3_hash_many_avx512:
_CET_ENDBR
push r15
push r14
push r13
push r12
push rbx
push rbp
mov rbp, rsp
sub rsp, 144
and rsp, 0xFFFFFFFFFFFFFFC0
neg r9
kmovw k1, r9d
vmovd xmm0, r8d
vpbroadcastd ymm0, xmm0
shr r8, 32
vmovd xmm1, r8d
vpbroadcastd ymm1, xmm1
vmovdqa ymm4, ymm1
vmovdqa ymm5, ymm1
vpaddd ymm2, ymm0, ymmword ptr [ADD0+rip]
vpaddd ymm3, ymm0, ymmword ptr [ADD0+32+rip]
vpcmpltud k2, ymm2, ymm0
vpcmpltud k3, ymm3, ymm0
vpaddd ymm4 {k2}, ymm4, dword ptr [ADD1+rip] {1to8}
vpaddd ymm5 {k3}, ymm5, dword ptr [ADD1+rip] {1to8}
knotw k2, k1
vmovdqa32 ymm2 {k2}, ymm0
vmovdqa32 ymm3 {k2}, ymm0
vmovdqa32 ymm4 {k2}, ymm1
vmovdqa32 ymm5 {k2}, ymm1
vmovdqa ymmword ptr [rsp], ymm2
vmovdqa ymmword ptr [rsp+0x1*0x20], ymm3
vmovdqa ymmword ptr [rsp+0x2*0x20], ymm4
vmovdqa ymmword ptr [rsp+0x3*0x20], ymm5
shl rdx, 6
mov qword ptr [rsp+0x80], rdx
cmp rsi, 16
jc 3f
2:
vpbroadcastd zmm0, dword ptr [rcx]
vpbroadcastd zmm1, dword ptr [rcx+0x1*0x4]
vpbroadcastd zmm2, dword ptr [rcx+0x2*0x4]
vpbroadcastd zmm3, dword ptr [rcx+0x3*0x4]
vpbroadcastd zmm4, dword ptr [rcx+0x4*0x4]
vpbroadcastd zmm5, dword ptr [rcx+0x5*0x4]
vpbroadcastd zmm6, dword ptr [rcx+0x6*0x4]
vpbroadcastd zmm7, dword ptr [rcx+0x7*0x4]
movzx eax, byte ptr [rbp+0x38]
movzx ebx, byte ptr [rbp+0x40]
or eax, ebx
xor edx, edx
.p2align 5
9:
movzx ebx, byte ptr [rbp+0x48]
or ebx, eax
add rdx, 64
cmp rdx, qword ptr [rsp+0x80]
cmove eax, ebx
mov dword ptr [rsp+0x88], eax
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x40]
mov r13, qword ptr [rdi+0x48]
mov r14, qword ptr [rdi+0x50]
mov r15, qword ptr [rdi+0x58]
vmovdqu32 ymm16, ymmword ptr [rdx+r8-0x2*0x20]
vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-0x2*0x20], 0x01
vmovdqu32 ymm17, ymmword ptr [rdx+r9-0x2*0x20]
vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-0x2*0x20], 0x01
vpunpcklqdq zmm8, zmm16, zmm17
vpunpckhqdq zmm9, zmm16, zmm17
vmovdqu32 ymm18, ymmword ptr [rdx+r10-0x2*0x20]
vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-0x2*0x20], 0x01
vmovdqu32 ymm19, ymmword ptr [rdx+r11-0x2*0x20]
vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-0x2*0x20], 0x01
vpunpcklqdq zmm10, zmm18, zmm19
vpunpckhqdq zmm11, zmm18, zmm19
mov r8, qword ptr [rdi+0x20]
mov r9, qword ptr [rdi+0x28]
mov r10, qword ptr [rdi+0x30]
mov r11, qword ptr [rdi+0x38]
mov r12, qword ptr [rdi+0x60]
mov r13, qword ptr [rdi+0x68]
mov r14, qword ptr [rdi+0x70]
mov r15, qword ptr [rdi+0x78]
vmovdqu32 ymm16, ymmword ptr [rdx+r8-0x2*0x20]
vinserti64x4 zmm16, zmm16, ymmword ptr [rdx+r12-0x2*0x20], 0x01
vmovdqu32 ymm17, ymmword ptr [rdx+r9-0x2*0x20]
vinserti64x4 zmm17, zmm17, ymmword ptr [rdx+r13-0x2*0x20], 0x01
vpunpcklqdq zmm12, zmm16, zmm17
vpunpckhqdq zmm13, zmm16, zmm17
vmovdqu32 ymm18, ymmword ptr [rdx+r10-0x2*0x20]
vinserti64x4 zmm18, zmm18, ymmword ptr [rdx+r14-0x2*0x20], 0x01
vmovdqu32 ymm19, ymmword ptr [rdx+r11-0x2*0x20]
vinserti64x4 zmm19, zmm19, ymmword ptr [rdx+r15-0x2*0x20], 0x01
vpunpcklqdq zmm14, zmm18, zmm19
vpunpckhqdq zmm15, zmm18, zmm19
vmovdqa32 zmm27, zmmword ptr [INDEX0+rip]
vmovdqa32 zmm31, zmmword ptr [INDEX1+rip]
vshufps zmm16, zmm8, zmm10, 136
vshufps zmm17, zmm12, zmm14, 136
vmovdqa32 zmm20, zmm16
vpermt2d zmm16, zmm27, zmm17
vpermt2d zmm20, zmm31, zmm17
vshufps zmm17, zmm8, zmm10, 221
vshufps zmm30, zmm12, zmm14, 221
vmovdqa32 zmm21, zmm17
vpermt2d zmm17, zmm27, zmm30
vpermt2d zmm21, zmm31, zmm30
vshufps zmm18, zmm9, zmm11, 136
vshufps zmm8, zmm13, zmm15, 136
vmovdqa32 zmm22, zmm18
vpermt2d zmm18, zmm27, zmm8
vpermt2d zmm22, zmm31, zmm8
vshufps zmm19, zmm9, zmm11, 221
vshufps zmm8, zmm13, zmm15, 221
vmovdqa32 zmm23, zmm19
vpermt2d zmm19, zmm27, zmm8
vpermt2d zmm23, zmm31, zmm8
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x40]
mov r13, qword ptr [rdi+0x48]
mov r14, qword ptr [rdi+0x50]
mov r15, qword ptr [rdi+0x58]
vmovdqu32 ymm24, ymmword ptr [r8+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r9+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm8, zmm24, zmm25
vpunpckhqdq zmm9, zmm24, zmm25
vmovdqu32 ymm24, ymmword ptr [r10+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r11+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm10, zmm24, zmm25
vpunpckhqdq zmm11, zmm24, zmm25
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r12+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r13+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r14+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
prefetcht0 [r15+rdx+0x80]
mov r8, qword ptr [rdi+0x20]
mov r9, qword ptr [rdi+0x28]
mov r10, qword ptr [rdi+0x30]
mov r11, qword ptr [rdi+0x38]
mov r12, qword ptr [rdi+0x60]
mov r13, qword ptr [rdi+0x68]
mov r14, qword ptr [rdi+0x70]
mov r15, qword ptr [rdi+0x78]
vmovdqu32 ymm24, ymmword ptr [r8+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r12+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r9+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r13+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm12, zmm24, zmm25
vpunpckhqdq zmm13, zmm24, zmm25
vmovdqu32 ymm24, ymmword ptr [r10+rdx-0x1*0x20]
vinserti64x4 zmm24, zmm24, ymmword ptr [r14+rdx-0x1*0x20], 0x01
vmovdqu32 ymm25, ymmword ptr [r11+rdx-0x1*0x20]
vinserti64x4 zmm25, zmm25, ymmword ptr [r15+rdx-0x1*0x20], 0x01
vpunpcklqdq zmm14, zmm24, zmm25
vpunpckhqdq zmm15, zmm24, zmm25
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r12+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r13+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r14+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
prefetcht0 [r15+rdx+0x80]
vshufps zmm24, zmm8, zmm10, 136
vshufps zmm30, zmm12, zmm14, 136
vmovdqa32 zmm28, zmm24
vpermt2d zmm24, zmm27, zmm30
vpermt2d zmm28, zmm31, zmm30
vshufps zmm25, zmm8, zmm10, 221
vshufps zmm30, zmm12, zmm14, 221
vmovdqa32 zmm29, zmm25
vpermt2d zmm25, zmm27, zmm30
vpermt2d zmm29, zmm31, zmm30
vshufps zmm26, zmm9, zmm11, 136
vshufps zmm8, zmm13, zmm15, 136
vmovdqa32 zmm30, zmm26
vpermt2d zmm26, zmm27, zmm8
vpermt2d zmm30, zmm31, zmm8
vshufps zmm8, zmm9, zmm11, 221
vshufps zmm10, zmm13, zmm15, 221
vpermi2d zmm27, zmm8, zmm10
vpermi2d zmm31, zmm8, zmm10
vpbroadcastd zmm8, dword ptr [BLAKE3_IV_0+rip]
vpbroadcastd zmm9, dword ptr [BLAKE3_IV_1+rip]
vpbroadcastd zmm10, dword ptr [BLAKE3_IV_2+rip]
vpbroadcastd zmm11, dword ptr [BLAKE3_IV_3+rip]
vmovdqa32 zmm12, zmmword ptr [rsp]
vmovdqa32 zmm13, zmmword ptr [rsp+0x1*0x40]
vpbroadcastd zmm14, dword ptr [BLAKE3_BLOCK_LEN+rip]
vpbroadcastd zmm15, dword ptr [rsp+0x22*0x4]
vpaddd zmm0, zmm0, zmm16
vpaddd zmm1, zmm1, zmm18
vpaddd zmm2, zmm2, zmm20
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm17
vpaddd zmm1, zmm1, zmm19
vpaddd zmm2, zmm2, zmm21
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm24
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm28
vpaddd zmm3, zmm3, zmm30
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm25
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm29
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm18
vpaddd zmm1, zmm1, zmm19
vpaddd zmm2, zmm2, zmm23
vpaddd zmm3, zmm3, zmm20
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm22
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm16
vpaddd zmm3, zmm3, zmm29
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm17
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm25
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm27
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm30
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm19
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm29
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm20
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm18
vpaddd zmm3, zmm3, zmm30
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm22
vpaddd zmm1, zmm1, zmm25
vpaddd zmm2, zmm2, zmm27
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm21
vpaddd zmm1, zmm1, zmm16
vpaddd zmm2, zmm2, zmm31
vpaddd zmm3, zmm3, zmm17
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm26
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm30
vpaddd zmm3, zmm3, zmm29
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm23
vpaddd zmm1, zmm1, zmm25
vpaddd zmm2, zmm2, zmm19
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm20
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm21
vpaddd zmm3, zmm3, zmm17
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm16
vpaddd zmm1, zmm1, zmm18
vpaddd zmm2, zmm2, zmm24
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm28
vpaddd zmm1, zmm1, zmm25
vpaddd zmm2, zmm2, zmm31
vpaddd zmm3, zmm3, zmm30
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm29
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm26
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm23
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm16
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm18
vpaddd zmm1, zmm1, zmm19
vpaddd zmm2, zmm2, zmm17
vpaddd zmm3, zmm3, zmm20
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm25
vpaddd zmm1, zmm1, zmm27
vpaddd zmm2, zmm2, zmm24
vpaddd zmm3, zmm3, zmm31
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm30
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm28
vpaddd zmm3, zmm3, zmm17
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm29
vpaddd zmm1, zmm1, zmm16
vpaddd zmm2, zmm2, zmm18
vpaddd zmm3, zmm3, zmm20
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm19
vpaddd zmm1, zmm1, zmm26
vpaddd zmm2, zmm2, zmm22
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpaddd zmm0, zmm0, zmm27
vpaddd zmm1, zmm1, zmm21
vpaddd zmm2, zmm2, zmm17
vpaddd zmm3, zmm3, zmm24
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vprord zmm15, zmm15, 16
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 12
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vpaddd zmm0, zmm0, zmm31
vpaddd zmm1, zmm1, zmm16
vpaddd zmm2, zmm2, zmm25
vpaddd zmm3, zmm3, zmm22
vpaddd zmm0, zmm0, zmm4
vpaddd zmm1, zmm1, zmm5
vpaddd zmm2, zmm2, zmm6
vpaddd zmm3, zmm3, zmm7
vpxord zmm12, zmm12, zmm0
vpxord zmm13, zmm13, zmm1
vpxord zmm14, zmm14, zmm2
vpxord zmm15, zmm15, zmm3
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vprord zmm15, zmm15, 8
vpaddd zmm8, zmm8, zmm12
vpaddd zmm9, zmm9, zmm13
vpaddd zmm10, zmm10, zmm14
vpaddd zmm11, zmm11, zmm15
vpxord zmm4, zmm4, zmm8
vpxord zmm5, zmm5, zmm9
vpxord zmm6, zmm6, zmm10
vpxord zmm7, zmm7, zmm11
vprord zmm4, zmm4, 7
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vpaddd zmm0, zmm0, zmm30
vpaddd zmm1, zmm1, zmm18
vpaddd zmm2, zmm2, zmm19
vpaddd zmm3, zmm3, zmm23
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 16
vprord zmm12, zmm12, 16
vprord zmm13, zmm13, 16
vprord zmm14, zmm14, 16
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 12
vprord zmm6, zmm6, 12
vprord zmm7, zmm7, 12
vprord zmm4, zmm4, 12
vpaddd zmm0, zmm0, zmm26
vpaddd zmm1, zmm1, zmm28
vpaddd zmm2, zmm2, zmm20
vpaddd zmm3, zmm3, zmm29
vpaddd zmm0, zmm0, zmm5
vpaddd zmm1, zmm1, zmm6
vpaddd zmm2, zmm2, zmm7
vpaddd zmm3, zmm3, zmm4
vpxord zmm15, zmm15, zmm0
vpxord zmm12, zmm12, zmm1
vpxord zmm13, zmm13, zmm2
vpxord zmm14, zmm14, zmm3
vprord zmm15, zmm15, 8
vprord zmm12, zmm12, 8
vprord zmm13, zmm13, 8
vprord zmm14, zmm14, 8
vpaddd zmm10, zmm10, zmm15
vpaddd zmm11, zmm11, zmm12
vpaddd zmm8, zmm8, zmm13
vpaddd zmm9, zmm9, zmm14
vpxord zmm5, zmm5, zmm10
vpxord zmm6, zmm6, zmm11
vpxord zmm7, zmm7, zmm8
vpxord zmm4, zmm4, zmm9
vprord zmm5, zmm5, 7
vprord zmm6, zmm6, 7
vprord zmm7, zmm7, 7
vprord zmm4, zmm4, 7
vpxord zmm0, zmm0, zmm8
vpxord zmm1, zmm1, zmm9
vpxord zmm2, zmm2, zmm10
vpxord zmm3, zmm3, zmm11
vpxord zmm4, zmm4, zmm12
vpxord zmm5, zmm5, zmm13
vpxord zmm6, zmm6, zmm14
vpxord zmm7, zmm7, zmm15
movzx eax, byte ptr [rbp+0x38]
jne 9b
mov rbx, qword ptr [rbp+0x50]
vpunpckldq zmm16, zmm0, zmm1
vpunpckhdq zmm17, zmm0, zmm1
vpunpckldq zmm18, zmm2, zmm3
vpunpckhdq zmm19, zmm2, zmm3
vpunpckldq zmm20, zmm4, zmm5
vpunpckhdq zmm21, zmm4, zmm5
vpunpckldq zmm22, zmm6, zmm7
vpunpckhdq zmm23, zmm6, zmm7
vpunpcklqdq zmm0, zmm16, zmm18
vpunpckhqdq zmm1, zmm16, zmm18
vpunpcklqdq zmm2, zmm17, zmm19
vpunpckhqdq zmm3, zmm17, zmm19
vpunpcklqdq zmm4, zmm20, zmm22
vpunpckhqdq zmm5, zmm20, zmm22
vpunpcklqdq zmm6, zmm21, zmm23
vpunpckhqdq zmm7, zmm21, zmm23
vshufi32x4 zmm16, zmm0, zmm4, 0x88
vshufi32x4 zmm17, zmm1, zmm5, 0x88
vshufi32x4 zmm18, zmm2, zmm6, 0x88
vshufi32x4 zmm19, zmm3, zmm7, 0x88
vshufi32x4 zmm20, zmm0, zmm4, 0xDD
vshufi32x4 zmm21, zmm1, zmm5, 0xDD
vshufi32x4 zmm22, zmm2, zmm6, 0xDD
vshufi32x4 zmm23, zmm3, zmm7, 0xDD
vshufi32x4 zmm0, zmm16, zmm17, 0x88
vshufi32x4 zmm1, zmm18, zmm19, 0x88
vshufi32x4 zmm2, zmm20, zmm21, 0x88
vshufi32x4 zmm3, zmm22, zmm23, 0x88
vshufi32x4 zmm4, zmm16, zmm17, 0xDD
vshufi32x4 zmm5, zmm18, zmm19, 0xDD
vshufi32x4 zmm6, zmm20, zmm21, 0xDD
vshufi32x4 zmm7, zmm22, zmm23, 0xDD
vmovdqu32 zmmword ptr [rbx], zmm0
vmovdqu32 zmmword ptr [rbx+0x1*0x40], zmm1
vmovdqu32 zmmword ptr [rbx+0x2*0x40], zmm2
vmovdqu32 zmmword ptr [rbx+0x3*0x40], zmm3
vmovdqu32 zmmword ptr [rbx+0x4*0x40], zmm4
vmovdqu32 zmmword ptr [rbx+0x5*0x40], zmm5
vmovdqu32 zmmword ptr [rbx+0x6*0x40], zmm6
vmovdqu32 zmmword ptr [rbx+0x7*0x40], zmm7
vmovdqa32 zmm0, zmmword ptr [rsp]
vmovdqa32 zmm1, zmmword ptr [rsp+0x1*0x40]
vmovdqa32 zmm2, zmm0
vpaddd zmm2{k1}, zmm0, dword ptr [ADD16+rip] {1to16}
vpcmpltud k2, zmm2, zmm0
vpaddd zmm1 {k2}, zmm1, dword ptr [ADD1+rip] {1to16}
vmovdqa32 zmmword ptr [rsp], zmm2
vmovdqa32 zmmword ptr [rsp+0x1*0x40], zmm1
add rdi, 128
add rbx, 512
mov qword ptr [rbp+0x50], rbx
sub rsi, 16
cmp rsi, 16
jnc 2b
test rsi, rsi
jnz 3f
4:
vzeroupper
mov rsp, rbp
pop rbp
pop rbx
pop r12
pop r13
pop r14
pop r15
ret
.p2align 6
3:
test esi, 0x8
je 3f
vpbroadcastd ymm0, dword ptr [rcx]
vpbroadcastd ymm1, dword ptr [rcx+0x4]
vpbroadcastd ymm2, dword ptr [rcx+0x8]
vpbroadcastd ymm3, dword ptr [rcx+0xC]
vpbroadcastd ymm4, dword ptr [rcx+0x10]
vpbroadcastd ymm5, dword ptr [rcx+0x14]
vpbroadcastd ymm6, dword ptr [rcx+0x18]
vpbroadcastd ymm7, dword ptr [rcx+0x1C]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x20]
mov r13, qword ptr [rdi+0x28]
mov r14, qword ptr [rdi+0x30]
mov r15, qword ptr [rdi+0x38]
movzx eax, byte ptr [rbp+0x38]
movzx ebx, byte ptr [rbp+0x40]
or eax, ebx
xor edx, edx
2:
movzx ebx, byte ptr [rbp+0x48]
or ebx, eax
add rdx, 64
cmp rdx, qword ptr [rsp+0x80]
cmove eax, ebx
mov dword ptr [rsp+0x88], eax
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x40], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x40]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x40], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x40]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x40], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x40]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x40], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm16, ymm12, ymm14, 136
vshufps ymm17, ymm12, ymm14, 221
vshufps ymm18, ymm13, ymm15, 136
vshufps ymm19, ymm13, ymm15, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x30]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x30], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x30], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x30]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x30], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x30]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x30], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm20, ymm12, ymm14, 136
vshufps ymm21, ymm12, ymm14, 221
vshufps ymm22, ymm13, ymm15, 136
vshufps ymm23, ymm13, ymm15, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x20], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x20]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x20], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x20]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x20], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x20]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x20], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm24, ymm12, ymm14, 136
vshufps ymm25, ymm12, ymm14, 221
vshufps ymm26, ymm13, ymm15, 136
vshufps ymm27, ymm13, ymm15, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x10]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x10], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x10], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x10]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x10], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x10]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x10], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm28, ymm12, ymm14, 136
vshufps ymm29, ymm12, ymm14, 221
vshufps ymm30, ymm13, ymm15, 136
vshufps ymm31, ymm13, ymm15, 221
vpbroadcastd ymm8, dword ptr [BLAKE3_IV_0+rip]
vpbroadcastd ymm9, dword ptr [BLAKE3_IV_1+rip]
vpbroadcastd ymm10, dword ptr [BLAKE3_IV_2+rip]
vpbroadcastd ymm11, dword ptr [BLAKE3_IV_3+rip]
vmovdqa ymm12, ymmword ptr [rsp]
vmovdqa ymm13, ymmword ptr [rsp+0x40]
vpbroadcastd ymm14, dword ptr [BLAKE3_BLOCK_LEN+rip]
vpbroadcastd ymm15, dword ptr [rsp+0x88]
vpaddd ymm0, ymm0, ymm16
vpaddd ymm1, ymm1, ymm18
vpaddd ymm2, ymm2, ymm20
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm17
vpaddd ymm1, ymm1, ymm19
vpaddd ymm2, ymm2, ymm21
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm24
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm28
vpaddd ymm3, ymm3, ymm30
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm25
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm29
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm18
vpaddd ymm1, ymm1, ymm19
vpaddd ymm2, ymm2, ymm23
vpaddd ymm3, ymm3, ymm20
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm22
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm16
vpaddd ymm3, ymm3, ymm29
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm17
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm25
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm27
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm30
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm19
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm29
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm20
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm18
vpaddd ymm3, ymm3, ymm30
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm22
vpaddd ymm1, ymm1, ymm25
vpaddd ymm2, ymm2, ymm27
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm21
vpaddd ymm1, ymm1, ymm16
vpaddd ymm2, ymm2, ymm31
vpaddd ymm3, ymm3, ymm17
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm26
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm30
vpaddd ymm3, ymm3, ymm29
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm23
vpaddd ymm1, ymm1, ymm25
vpaddd ymm2, ymm2, ymm19
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm20
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm21
vpaddd ymm3, ymm3, ymm17
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm16
vpaddd ymm1, ymm1, ymm18
vpaddd ymm2, ymm2, ymm24
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm28
vpaddd ymm1, ymm1, ymm25
vpaddd ymm2, ymm2, ymm31
vpaddd ymm3, ymm3, ymm30
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm29
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm26
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm23
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm16
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm18
vpaddd ymm1, ymm1, ymm19
vpaddd ymm2, ymm2, ymm17
vpaddd ymm3, ymm3, ymm20
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm25
vpaddd ymm1, ymm1, ymm27
vpaddd ymm2, ymm2, ymm24
vpaddd ymm3, ymm3, ymm31
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm30
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm28
vpaddd ymm3, ymm3, ymm17
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm29
vpaddd ymm1, ymm1, ymm16
vpaddd ymm2, ymm2, ymm18
vpaddd ymm3, ymm3, ymm20
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm19
vpaddd ymm1, ymm1, ymm26
vpaddd ymm2, ymm2, ymm22
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpaddd ymm0, ymm0, ymm27
vpaddd ymm1, ymm1, ymm21
vpaddd ymm2, ymm2, ymm17
vpaddd ymm3, ymm3, ymm24
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vprord ymm15, ymm15, 16
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 12
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vpaddd ymm0, ymm0, ymm31
vpaddd ymm1, ymm1, ymm16
vpaddd ymm2, ymm2, ymm25
vpaddd ymm3, ymm3, ymm22
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxord ymm12, ymm12, ymm0
vpxord ymm13, ymm13, ymm1
vpxord ymm14, ymm14, ymm2
vpxord ymm15, ymm15, ymm3
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vprord ymm15, ymm15, 8
vpaddd ymm8, ymm8, ymm12
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxord ymm4, ymm4, ymm8
vpxord ymm5, ymm5, ymm9
vpxord ymm6, ymm6, ymm10
vpxord ymm7, ymm7, ymm11
vprord ymm4, ymm4, 7
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vpaddd ymm0, ymm0, ymm30
vpaddd ymm1, ymm1, ymm18
vpaddd ymm2, ymm2, ymm19
vpaddd ymm3, ymm3, ymm23
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 16
vprord ymm12, ymm12, 16
vprord ymm13, ymm13, 16
vprord ymm14, ymm14, 16
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 12
vprord ymm6, ymm6, 12
vprord ymm7, ymm7, 12
vprord ymm4, ymm4, 12
vpaddd ymm0, ymm0, ymm26
vpaddd ymm1, ymm1, ymm28
vpaddd ymm2, ymm2, ymm20
vpaddd ymm3, ymm3, ymm29
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxord ymm15, ymm15, ymm0
vpxord ymm12, ymm12, ymm1
vpxord ymm13, ymm13, ymm2
vpxord ymm14, ymm14, ymm3
vprord ymm15, ymm15, 8
vprord ymm12, ymm12, 8
vprord ymm13, ymm13, 8
vprord ymm14, ymm14, 8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm8, ymm13
vpaddd ymm9, ymm9, ymm14
vpxord ymm5, ymm5, ymm10
vpxord ymm6, ymm6, ymm11
vpxord ymm7, ymm7, ymm8
vpxord ymm4, ymm4, ymm9
vprord ymm5, ymm5, 7
vprord ymm6, ymm6, 7
vprord ymm7, ymm7, 7
vprord ymm4, ymm4, 7
vpxor ymm0, ymm0, ymm8
vpxor ymm1, ymm1, ymm9
vpxor ymm2, ymm2, ymm10
vpxor ymm3, ymm3, ymm11
vpxor ymm4, ymm4, ymm12
vpxor ymm5, ymm5, ymm13
vpxor ymm6, ymm6, ymm14
vpxor ymm7, ymm7, ymm15
movzx eax, byte ptr [rbp+0x38]
jne 2b
mov rbx, qword ptr [rbp+0x50]
vunpcklps ymm8, ymm0, ymm1
vunpcklps ymm9, ymm2, ymm3
vunpckhps ymm10, ymm0, ymm1
vunpcklps ymm11, ymm4, ymm5
vunpcklps ymm0, ymm6, ymm7
vshufps ymm12, ymm8, ymm9, 78
vblendps ymm1, ymm8, ymm12, 0xCC
vshufps ymm8, ymm11, ymm0, 78
vunpckhps ymm13, ymm2, ymm3
vblendps ymm2, ymm11, ymm8, 0xCC
vblendps ymm3, ymm12, ymm9, 0xCC
vperm2f128 ymm12, ymm1, ymm2, 0x20
vmovups ymmword ptr [rbx], ymm12
vunpckhps ymm14, ymm4, ymm5
vblendps ymm4, ymm8, ymm0, 0xCC
vunpckhps ymm15, ymm6, ymm7
vperm2f128 ymm7, ymm3, ymm4, 0x20
vmovups ymmword ptr [rbx+0x20], ymm7
vshufps ymm5, ymm10, ymm13, 78
vblendps ymm6, ymm5, ymm13, 0xCC
vshufps ymm13, ymm14, ymm15, 78
vblendps ymm10, ymm10, ymm5, 0xCC
vblendps ymm14, ymm14, ymm13, 0xCC
vperm2f128 ymm8, ymm10, ymm14, 0x20
vmovups ymmword ptr [rbx+0x40], ymm8
vblendps ymm15, ymm13, ymm15, 0xCC
vperm2f128 ymm13, ymm6, ymm15, 0x20
vmovups ymmword ptr [rbx+0x60], ymm13
vperm2f128 ymm9, ymm1, ymm2, 0x31
vperm2f128 ymm11, ymm3, ymm4, 0x31
vmovups ymmword ptr [rbx+0x80], ymm9
vperm2f128 ymm14, ymm10, ymm14, 0x31
vperm2f128 ymm15, ymm6, ymm15, 0x31
vmovups ymmword ptr [rbx+0xA0], ymm11
vmovups ymmword ptr [rbx+0xC0], ymm14
vmovups ymmword ptr [rbx+0xE0], ymm15
vmovdqa ymm0, ymmword ptr [rsp]
vmovdqa ymm2, ymmword ptr [rsp+0x2*0x20]
vmovdqa32 ymm0 {k1}, ymmword ptr [rsp+0x1*0x20]
vmovdqa32 ymm2 {k1}, ymmword ptr [rsp+0x3*0x20]
vmovdqa ymmword ptr [rsp], ymm0
vmovdqa ymmword ptr [rsp+0x2*0x20], ymm2
add rbx, 256
mov qword ptr [rbp+0x50], rbx
add rdi, 64
sub rsi, 8
3:
mov rbx, qword ptr [rbp+0x50]
mov r15, qword ptr [rsp+0x80]
movzx r13, byte ptr [rbp+0x38]
movzx r12, byte ptr [rbp+0x48]
test esi, 0x4
je 3f
vbroadcasti32x4 zmm0, xmmword ptr [rcx]
vbroadcasti32x4 zmm1, xmmword ptr [rcx+0x1*0x10]
vmovdqa xmm12, xmmword ptr [rsp]
vmovdqa xmm13, xmmword ptr [rsp+0x4*0x10]
vpunpckldq xmm14, xmm12, xmm13
vpunpckhdq xmm15, xmm12, xmm13
vpermq ymm14, ymm14, 0xDC
vpermq ymm15, ymm15, 0xDC
vpbroadcastd zmm12, dword ptr [BLAKE3_BLOCK_LEN+rip]
vinserti64x4 zmm13, zmm14, ymm15, 0x01
mov eax, 17476
kmovw k2, eax
vpblendmd zmm13 {k2}, zmm13, zmm12
vbroadcasti32x4 zmm15, xmmword ptr [BLAKE3_IV+rip]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov eax, 43690
kmovw k3, eax
mov eax, 34952
kmovw k4, eax
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x88], eax
vmovdqa32 zmm2, zmm15
vpbroadcastd zmm8, dword ptr [rsp+0x22*0x4]
vpblendmd zmm3 {k4}, zmm13, zmm8
vmovups zmm8, zmmword ptr [r8+rdx-0x1*0x40]
vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-0x4*0x10], 0x01
vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-0x4*0x10], 0x02
vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-0x4*0x10], 0x03
vmovups zmm9, zmmword ptr [r8+rdx-0x30]
vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-0x3*0x10], 0x01
vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-0x3*0x10], 0x02
vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-0x3*0x10], 0x03
vshufps zmm4, zmm8, zmm9, 136
vshufps zmm5, zmm8, zmm9, 221
vmovups zmm8, zmmword ptr [r8+rdx-0x20]
vinserti32x4 zmm8, zmm8, xmmword ptr [r9+rdx-0x2*0x10], 0x01
vinserti32x4 zmm8, zmm8, xmmword ptr [r10+rdx-0x2*0x10], 0x02
vinserti32x4 zmm8, zmm8, xmmword ptr [r11+rdx-0x2*0x10], 0x03
vmovups zmm9, zmmword ptr [r8+rdx-0x10]
vinserti32x4 zmm9, zmm9, xmmword ptr [r9+rdx-0x1*0x10], 0x01
vinserti32x4 zmm9, zmm9, xmmword ptr [r10+rdx-0x1*0x10], 0x02
vinserti32x4 zmm9, zmm9, xmmword ptr [r11+rdx-0x1*0x10], 0x03
vshufps zmm6, zmm8, zmm9, 136
vshufps zmm7, zmm8, zmm9, 221
vpshufd zmm6, zmm6, 0x93
vpshufd zmm7, zmm7, 0x93
mov al, 7
9:
vpaddd zmm0, zmm0, zmm4
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 16
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 12
vpaddd zmm0, zmm0, zmm5
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 8
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 7
vpshufd zmm0, zmm0, 0x93
vpshufd zmm3, zmm3, 0x4E
vpshufd zmm2, zmm2, 0x39
vpaddd zmm0, zmm0, zmm6
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 16
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 12
vpaddd zmm0, zmm0, zmm7
vpaddd zmm0, zmm0, zmm1
vpxord zmm3, zmm3, zmm0
vprord zmm3, zmm3, 8
vpaddd zmm2, zmm2, zmm3
vpxord zmm1, zmm1, zmm2
vprord zmm1, zmm1, 7
vpshufd zmm0, zmm0, 0x39
vpshufd zmm3, zmm3, 0x4E
vpshufd zmm2, zmm2, 0x93
dec al
jz 9f
vshufps zmm8, zmm4, zmm5, 214
vpshufd zmm9, zmm4, 0x0F
vpshufd zmm4, zmm8, 0x39
vshufps zmm8, zmm6, zmm7, 250
vpblendmd zmm9 {k3}, zmm9, zmm8
vpunpcklqdq zmm8, zmm7, zmm5
vpblendmd zmm8 {k4}, zmm8, zmm6
vpshufd zmm8, zmm8, 0x78
vpunpckhdq zmm5, zmm5, zmm7
vpunpckldq zmm6, zmm6, zmm5
vpshufd zmm7, zmm6, 0x1E
vmovdqa32 zmm5, zmm9
vmovdqa32 zmm6, zmm8
jmp 9b
9:
vpxord zmm0, zmm0, zmm2
vpxord zmm1, zmm1, zmm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vextracti32x4 xmmword ptr [rbx+0x4*0x10], zmm0, 0x02
vextracti32x4 xmmword ptr [rbx+0x5*0x10], zmm1, 0x02
vextracti32x4 xmmword ptr [rbx+0x6*0x10], zmm0, 0x03
vextracti32x4 xmmword ptr [rbx+0x7*0x10], zmm1, 0x03
vmovdqa xmm0, xmmword ptr [rsp]
vmovdqa xmm2, xmmword ptr [rsp+0x40]
vmovdqa32 xmm0 {k1}, xmmword ptr [rsp+0x1*0x10]
vmovdqa32 xmm2 {k1}, xmmword ptr [rsp+0x5*0x10]
vmovdqa xmmword ptr [rsp], xmm0
vmovdqa xmmword ptr [rsp+0x40], xmm2
add rbx, 128
add rdi, 32
sub rsi, 4
3:
test esi, 0x2
je 3f
vbroadcasti128 ymm0, xmmword ptr [rcx]
vbroadcasti128 ymm1, xmmword ptr [rcx+0x10]
vmovd xmm13, dword ptr [rsp]
vpinsrd xmm13, xmm13, dword ptr [rsp+0x40], 1
vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovd xmm14, dword ptr [rsp+0x4]
vpinsrd xmm14, xmm14, dword ptr [rsp+0x44], 1
vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vinserti128 ymm13, ymm13, xmm14, 0x01
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x88], eax
vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip]
vpbroadcastd ymm8, dword ptr [rsp+0x88]
vpblendd ymm3, ymm13, ymm8, 0x88
vmovups ymm8, ymmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x40], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x30], 0x01
vshufps ymm4, ymm8, ymm9, 136
vshufps ymm5, ymm8, ymm9, 221
vmovups ymm8, ymmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x20], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x10], 0x01
vshufps ymm6, ymm8, ymm9, 136
vshufps ymm7, ymm8, ymm9, 221
vpshufd ymm6, ymm6, 0x93
vpshufd ymm7, ymm7, 0x93
mov al, 7
9:
vpaddd ymm0, ymm0, ymm4
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 16
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 12
vpaddd ymm0, ymm0, ymm5
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 8
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 7
vpshufd ymm0, ymm0, 0x93
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x39
vpaddd ymm0, ymm0, ymm6
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 16
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 12
vpaddd ymm0, ymm0, ymm7
vpaddd ymm0, ymm0, ymm1
vpxord ymm3, ymm3, ymm0
vprord ymm3, ymm3, 8
vpaddd ymm2, ymm2, ymm3
vpxord ymm1, ymm1, ymm2
vprord ymm1, ymm1, 7
vpshufd ymm0, ymm0, 0x39
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x93
dec al
jz 9f
vshufps ymm8, ymm4, ymm5, 214
vpshufd ymm9, ymm4, 0x0F
vpshufd ymm4, ymm8, 0x39
vshufps ymm8, ymm6, ymm7, 250
vpblendd ymm9, ymm9, ymm8, 0xAA
vpunpcklqdq ymm8, ymm7, ymm5
vpblendd ymm8, ymm8, ymm6, 0x88
vpshufd ymm8, ymm8, 0x78
vpunpckhdq ymm5, ymm5, ymm7
vpunpckldq ymm6, ymm6, ymm5
vpshufd ymm7, ymm6, 0x1E
vmovdqa ymm5, ymm9
vmovdqa ymm6, ymm8
jmp 9b
9:
vpxor ymm0, ymm0, ymm2
vpxor ymm1, ymm1, ymm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vmovdqa xmm0, xmmword ptr [rsp]
vmovdqa xmm2, xmmword ptr [rsp+0x4*0x10]
vmovdqu32 xmm0 {k1}, xmmword ptr [rsp+0x8]
vmovdqu32 xmm2 {k1}, xmmword ptr [rsp+0x48]
vmovdqa xmmword ptr [rsp], xmm0
vmovdqa xmmword ptr [rsp+0x4*0x10], xmm2
add rbx, 64
add rdi, 16
sub rsi, 2
3:
test esi, 0x1
je 4b
vmovdqu xmm0, xmmword ptr [rcx]
vmovdqu xmm1, xmmword ptr [rcx+0x10]
vmovd xmm14, dword ptr [rsp]
vpinsrd xmm14, xmm14, dword ptr [rsp+0x40], 1
vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovdqa xmm15, xmmword ptr [BLAKE3_IV+rip]
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
vpinsrd xmm3, xmm14, eax, 3
vmovdqa xmm2, xmm15
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vmovups xmm9, xmmword ptr [r8+rdx-0x30]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vmovups xmm9, xmmword ptr [r8+rdx-0x10]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
jmp 4b
.p2align 6
_blake3_compress_in_place_avx512:
blake3_compress_in_place_avx512:
_CET_ENDBR
vmovdqu xmm0, xmmword ptr [rdi]
vmovdqu xmm1, xmmword ptr [rdi+0x10]
movzx eax, r8b
movzx edx, dl
shl rax, 32
add rdx, rax
vmovq xmm3, rcx
vmovq xmm4, rdx
vpunpcklqdq xmm3, xmm3, xmm4
vmovaps xmm2, xmmword ptr [BLAKE3_IV+rip]
vmovups xmm8, xmmword ptr [rsi]
vmovups xmm9, xmmword ptr [rsi+0x10]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [rsi+0x20]
vmovups xmm9, xmmword ptr [rsi+0x30]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
vmovdqu xmmword ptr [rdi], xmm0
vmovdqu xmmword ptr [rdi+0x10], xmm1
ret
.p2align 6
_blake3_compress_xof_avx512:
blake3_compress_xof_avx512:
_CET_ENDBR
vmovdqu xmm0, xmmword ptr [rdi]
vmovdqu xmm1, xmmword ptr [rdi+0x10]
movzx eax, r8b
movzx edx, dl
shl rax, 32
add rdx, rax
vmovq xmm3, rcx
vmovq xmm4, rdx
vpunpcklqdq xmm3, xmm3, xmm4
vmovaps xmm2, xmmword ptr [BLAKE3_IV+rip]
vmovups xmm8, xmmword ptr [rsi]
vmovups xmm9, xmmword ptr [rsi+0x10]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [rsi+0x20]
vmovups xmm9, xmmword ptr [rsi+0x30]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 16
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 12
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxord xmm3, xmm3, xmm0
vprord xmm3, xmm3, 8
vpaddd xmm2, xmm2, xmm3
vpxord xmm1, xmm1, xmm2
vprord xmm1, xmm1, 7
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
vpxor xmm2, xmm2, [rdi]
vpxor xmm3, xmm3, [rdi+0x10]
vmovdqu xmmword ptr [r9], xmm0
vmovdqu xmmword ptr [r9+0x10], xmm1
vmovdqu xmmword ptr [r9+0x20], xmm2
vmovdqu xmmword ptr [r9+0x30], xmm3
ret
#ifdef __APPLE__
.static_data
#else
.section .rodata
#endif
.p2align 6
INDEX0:
.long 0, 1, 2, 3, 16, 17, 18, 19
.long 8, 9, 10, 11, 24, 25, 26, 27
INDEX1:
.long 4, 5, 6, 7, 20, 21, 22, 23
.long 12, 13, 14, 15, 28, 29, 30, 31
ADD0:
.long 0, 1, 2, 3, 4, 5, 6, 7
.long 8, 9, 10, 11, 12, 13, 14, 15
ADD1: .long 1
ADD16: .long 16
BLAKE3_BLOCK_LEN:
.long 64
.p2align 6
BLAKE3_IV:
BLAKE3_IV_0:
.long 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A
|
aenu1/aps3e
| 626,405
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/sha256_asm.S
|
/* sha256_asm.S */
/*
* Copyright (C) 2006-2024 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef WOLFSSL_USER_SETTINGS
#ifdef WOLFSSL_USER_SETTINGS_ASM
/*
* user_settings_asm.h is a file generated by the script user_settings_asm.sh.
* The script takes in a user_settings.h and produces user_settings_asm.h, which
* is a stripped down version of user_settings.h containing only preprocessor
* directives. This makes the header safe to include in assembly (.S) files.
*/
#include "user_settings_asm.h"
#else
/*
* Note: if user_settings.h contains any C code (e.g. a typedef or function
* prototype), including it here in an assembly (.S) file will cause an
* assembler failure. See user_settings_asm.h above.
*/
#include "user_settings.h"
#endif /* WOLFSSL_USER_SETTINGS_ASM */
#endif /* WOLFSSL_USER_SETTINGS */
#ifndef HAVE_INTEL_AVX1
#define HAVE_INTEL_AVX1
#endif /* HAVE_INTEL_AVX1 */
#ifndef NO_AVX2_SUPPORT
#define HAVE_INTEL_AVX2
#endif /* NO_AVX2_SUPPORT */
#ifdef WOLFSSL_X86_64_BUILD
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_sse2_sha256_sha_k:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0xfc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x6ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_sse2_sha256_shuf_mask:
.quad 0x405060700010203, 0xc0d0e0f08090a0b
#ifndef __APPLE__
.text
.globl Transform_Sha256_SSE2_Sha
.type Transform_Sha256_SSE2_Sha,@function
.align 16
Transform_Sha256_SSE2_Sha:
#else
.section __TEXT,__text
.globl _Transform_Sha256_SSE2_Sha
.p2align 4
_Transform_Sha256_SSE2_Sha:
#endif /* __APPLE__ */
leaq L_sse2_sha256_sha_k(%rip), %rdx
movdqa L_sse2_sha256_shuf_mask(%rip), %xmm10
movq (%rdi), %xmm1
movq 8(%rdi), %xmm2
movhpd 16(%rdi), %xmm1
movhpd 24(%rdi), %xmm2
pshufd $27, %xmm1, %xmm1
pshufd $27, %xmm2, %xmm2
movdqu (%rsi), %xmm3
movdqu 16(%rsi), %xmm4
movdqu 32(%rsi), %xmm5
movdqu 48(%rsi), %xmm6
pshufb %xmm10, %xmm3
movdqa %xmm1, %xmm8
movdqa %xmm2, %xmm9
# Rounds: 0-3
movdqa %xmm3, %xmm0
paddd (%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
pshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
# Rounds: 4-7
pshufb %xmm10, %xmm4
movdqa %xmm4, %xmm0
paddd 16(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm4, %xmm3
sha256rnds2 %xmm2, %xmm1
# Rounds: 8-11
pshufb %xmm10, %xmm5
movdqa %xmm5, %xmm0
paddd 32(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm5, %xmm4
sha256rnds2 %xmm2, %xmm1
# Rounds: 12-15
pshufb %xmm10, %xmm6
movdqa %xmm6, %xmm0
paddd 48(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm6, %xmm7
palignr $4, %xmm5, %xmm7
paddd %xmm7, %xmm3
sha256msg2 %xmm6, %xmm3
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm6, %xmm5
sha256rnds2 %xmm2, %xmm1
# Rounds: 16-19
movdqa %xmm3, %xmm0
paddd 64(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm3, %xmm7
palignr $4, %xmm6, %xmm7
paddd %xmm7, %xmm4
sha256msg2 %xmm3, %xmm4
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm3, %xmm6
sha256rnds2 %xmm2, %xmm1
# Rounds: 20-23
movdqa %xmm4, %xmm0
paddd 80(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm4, %xmm7
palignr $4, %xmm3, %xmm7
paddd %xmm7, %xmm5
sha256msg2 %xmm4, %xmm5
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm4, %xmm3
sha256rnds2 %xmm2, %xmm1
# Rounds: 24-27
movdqa %xmm5, %xmm0
paddd 96(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm5, %xmm7
palignr $4, %xmm4, %xmm7
paddd %xmm7, %xmm6
sha256msg2 %xmm5, %xmm6
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm5, %xmm4
sha256rnds2 %xmm2, %xmm1
# Rounds: 28-31
movdqa %xmm6, %xmm0
paddd 112(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm6, %xmm7
palignr $4, %xmm5, %xmm7
paddd %xmm7, %xmm3
sha256msg2 %xmm6, %xmm3
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm6, %xmm5
sha256rnds2 %xmm2, %xmm1
# Rounds: 32-35
movdqa %xmm3, %xmm0
paddd 128(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm3, %xmm7
palignr $4, %xmm6, %xmm7
paddd %xmm7, %xmm4
sha256msg2 %xmm3, %xmm4
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm3, %xmm6
sha256rnds2 %xmm2, %xmm1
# Rounds: 36-39
movdqa %xmm4, %xmm0
paddd 144(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm4, %xmm7
palignr $4, %xmm3, %xmm7
paddd %xmm7, %xmm5
sha256msg2 %xmm4, %xmm5
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm4, %xmm3
sha256rnds2 %xmm2, %xmm1
# Rounds: 40-43
movdqa %xmm5, %xmm0
paddd 160(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm5, %xmm7
palignr $4, %xmm4, %xmm7
paddd %xmm7, %xmm6
sha256msg2 %xmm5, %xmm6
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm5, %xmm4
sha256rnds2 %xmm2, %xmm1
# Rounds: 44-47
movdqa %xmm6, %xmm0
paddd 176(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm6, %xmm7
palignr $4, %xmm5, %xmm7
paddd %xmm7, %xmm3
sha256msg2 %xmm6, %xmm3
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm6, %xmm5
sha256rnds2 %xmm2, %xmm1
# Rounds: 48-51
movdqa %xmm3, %xmm0
paddd 192(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm3, %xmm7
palignr $4, %xmm6, %xmm7
paddd %xmm7, %xmm4
sha256msg2 %xmm3, %xmm4
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm3, %xmm6
sha256rnds2 %xmm2, %xmm1
# Rounds: 52-63
movdqa %xmm4, %xmm0
paddd 208(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm4, %xmm7
palignr $4, %xmm3, %xmm7
paddd %xmm7, %xmm5
sha256msg2 %xmm4, %xmm5
pshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
movdqa %xmm5, %xmm0
paddd 224(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm5, %xmm7
palignr $4, %xmm4, %xmm7
paddd %xmm7, %xmm6
sha256msg2 %xmm5, %xmm6
pshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
movdqa %xmm6, %xmm0
paddd 240(%rdx), %xmm0
sha256rnds2 %xmm1, %xmm2
pshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
paddd %xmm8, %xmm1
paddd %xmm9, %xmm2
pshufd $27, %xmm1, %xmm1
pshufd $27, %xmm2, %xmm2
movq %xmm1, (%rdi)
movq %xmm2, 8(%rdi)
movhpd %xmm1, 16(%rdi)
movhpd %xmm2, 24(%rdi)
xorq %rax, %rax
vzeroupper
repz retq
#ifndef __APPLE__
.size Transform_Sha256_SSE2_Sha,.-Transform_Sha256_SSE2_Sha
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl Transform_Sha256_SSE2_Sha_Len
.type Transform_Sha256_SSE2_Sha_Len,@function
.align 16
Transform_Sha256_SSE2_Sha_Len:
#else
.section __TEXT,__text
.globl _Transform_Sha256_SSE2_Sha_Len
.p2align 4
_Transform_Sha256_SSE2_Sha_Len:
#endif /* __APPLE__ */
leaq L_sse2_sha256_sha_k(%rip), %rax
movdqa L_sse2_sha256_shuf_mask(%rip), %xmm10
movq (%rdi), %xmm1
movq 8(%rdi), %xmm2
movhpd 16(%rdi), %xmm1
movhpd 24(%rdi), %xmm2
pshufd $27, %xmm1, %xmm1
pshufd $27, %xmm2, %xmm2
# Start of loop processing a block
L_sha256_sha_len_sse2_start:
movdqu (%rsi), %xmm3
movdqu 16(%rsi), %xmm4
movdqu 32(%rsi), %xmm5
movdqu 48(%rsi), %xmm6
pshufb %xmm10, %xmm3
movdqa %xmm1, %xmm8
movdqa %xmm2, %xmm9
# Rounds: 0-3
movdqa %xmm3, %xmm0
paddd (%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
pshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
# Rounds: 4-7
pshufb %xmm10, %xmm4
movdqa %xmm4, %xmm0
paddd 16(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm4, %xmm3
sha256rnds2 %xmm2, %xmm1
# Rounds: 8-11
pshufb %xmm10, %xmm5
movdqa %xmm5, %xmm0
paddd 32(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm5, %xmm4
sha256rnds2 %xmm2, %xmm1
# Rounds: 12-15
pshufb %xmm10, %xmm6
movdqa %xmm6, %xmm0
paddd 48(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm6, %xmm7
palignr $4, %xmm5, %xmm7
paddd %xmm7, %xmm3
sha256msg2 %xmm6, %xmm3
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm6, %xmm5
sha256rnds2 %xmm2, %xmm1
# Rounds: 16-19
movdqa %xmm3, %xmm0
paddd 64(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm3, %xmm7
palignr $4, %xmm6, %xmm7
paddd %xmm7, %xmm4
sha256msg2 %xmm3, %xmm4
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm3, %xmm6
sha256rnds2 %xmm2, %xmm1
# Rounds: 20-23
movdqa %xmm4, %xmm0
paddd 80(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm4, %xmm7
palignr $4, %xmm3, %xmm7
paddd %xmm7, %xmm5
sha256msg2 %xmm4, %xmm5
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm4, %xmm3
sha256rnds2 %xmm2, %xmm1
# Rounds: 24-27
movdqa %xmm5, %xmm0
paddd 96(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm5, %xmm7
palignr $4, %xmm4, %xmm7
paddd %xmm7, %xmm6
sha256msg2 %xmm5, %xmm6
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm5, %xmm4
sha256rnds2 %xmm2, %xmm1
# Rounds: 28-31
movdqa %xmm6, %xmm0
paddd 112(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm6, %xmm7
palignr $4, %xmm5, %xmm7
paddd %xmm7, %xmm3
sha256msg2 %xmm6, %xmm3
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm6, %xmm5
sha256rnds2 %xmm2, %xmm1
# Rounds: 32-35
movdqa %xmm3, %xmm0
paddd 128(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm3, %xmm7
palignr $4, %xmm6, %xmm7
paddd %xmm7, %xmm4
sha256msg2 %xmm3, %xmm4
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm3, %xmm6
sha256rnds2 %xmm2, %xmm1
# Rounds: 36-39
movdqa %xmm4, %xmm0
paddd 144(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm4, %xmm7
palignr $4, %xmm3, %xmm7
paddd %xmm7, %xmm5
sha256msg2 %xmm4, %xmm5
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm4, %xmm3
sha256rnds2 %xmm2, %xmm1
# Rounds: 40-43
movdqa %xmm5, %xmm0
paddd 160(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm5, %xmm7
palignr $4, %xmm4, %xmm7
paddd %xmm7, %xmm6
sha256msg2 %xmm5, %xmm6
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm5, %xmm4
sha256rnds2 %xmm2, %xmm1
# Rounds: 44-47
movdqa %xmm6, %xmm0
paddd 176(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm6, %xmm7
palignr $4, %xmm5, %xmm7
paddd %xmm7, %xmm3
sha256msg2 %xmm6, %xmm3
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm6, %xmm5
sha256rnds2 %xmm2, %xmm1
# Rounds: 48-51
movdqa %xmm3, %xmm0
paddd 192(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm3, %xmm7
palignr $4, %xmm6, %xmm7
paddd %xmm7, %xmm4
sha256msg2 %xmm3, %xmm4
pshufd $14, %xmm0, %xmm0
sha256msg1 %xmm3, %xmm6
sha256rnds2 %xmm2, %xmm1
# Rounds: 52-63
movdqa %xmm4, %xmm0
paddd 208(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm4, %xmm7
palignr $4, %xmm3, %xmm7
paddd %xmm7, %xmm5
sha256msg2 %xmm4, %xmm5
pshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
movdqa %xmm5, %xmm0
paddd 224(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
movdqa %xmm5, %xmm7
palignr $4, %xmm4, %xmm7
paddd %xmm7, %xmm6
sha256msg2 %xmm5, %xmm6
pshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
movdqa %xmm6, %xmm0
paddd 240(%rax), %xmm0
sha256rnds2 %xmm1, %xmm2
pshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
addq $0x40, %rsi
subl $0x40, %edx
paddd %xmm8, %xmm1
paddd %xmm9, %xmm2
jnz L_sha256_sha_len_sse2_start
pshufd $27, %xmm1, %xmm1
pshufd $27, %xmm2, %xmm2
movq %xmm1, (%rdi)
movq %xmm2, 8(%rdi)
movhpd %xmm1, 16(%rdi)
movhpd %xmm2, 24(%rdi)
xorq %rax, %rax
vzeroupper
repz retq
#ifndef __APPLE__
.size Transform_Sha256_SSE2_Sha_Len,.-Transform_Sha256_SSE2_Sha_Len
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX1
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_avx1_sha256_k:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0xfc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x6ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_sha256_shuf_00BA:
.quad 0xb0a090803020100, 0xffffffffffffffff
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_sha256_shuf_DC00:
.quad 0xffffffffffffffff, 0xb0a090803020100
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_sha256_flip_mask:
.quad 0x405060700010203, 0xc0d0e0f08090a0b
#ifndef __APPLE__
.text
.globl Transform_Sha256_AVX1
.type Transform_Sha256_AVX1,@function
.align 16
Transform_Sha256_AVX1:
#else
.section __TEXT,__text
.globl _Transform_Sha256_AVX1
.p2align 4
_Transform_Sha256_AVX1:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
subq $0x40, %rsp
leaq L_avx1_sha256_k(%rip), %rbp
vmovdqa L_avx1_sha256_flip_mask(%rip), %xmm13
vmovdqa L_avx1_sha256_shuf_00BA(%rip), %xmm11
vmovdqa L_avx1_sha256_shuf_DC00(%rip), %xmm12
movl (%rdi), %r8d
movl 4(%rdi), %r9d
movl 8(%rdi), %r10d
movl 12(%rdi), %r11d
movl 16(%rdi), %r12d
movl 20(%rdi), %r13d
movl 24(%rdi), %r14d
movl 28(%rdi), %r15d
# X0, X1, X2, X3 = W[0..15]
vmovdqu (%rsi), %xmm0
vmovdqu 16(%rsi), %xmm1
vpshufb %xmm13, %xmm0, %xmm0
vpshufb %xmm13, %xmm1, %xmm1
vmovdqu 32(%rsi), %xmm2
vmovdqu 48(%rsi), %xmm3
vpshufb %xmm13, %xmm2, %xmm2
vpshufb %xmm13, %xmm3, %xmm3
movl %r9d, %ebx
movl %r12d, %edx
xorl %r10d, %ebx
# set_w_k_xfer_4: 0
vpaddd (%rbp), %xmm0, %xmm4
vpaddd 16(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 32(%rbp), %xmm2, %xmm6
vpaddd 48(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
# msg_sched: 0-3
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm0, %xmm1, %xmm5
vpalignr $4, %xmm2, %xmm3, %xmm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl (%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 4(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm3, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm0, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 8(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 12(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %xmm4, %xmm9, %xmm0
# msg_sched done: 0-3
# msg_sched: 4-7
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm1, %xmm2, %xmm5
vpalignr $4, %xmm3, %xmm0, %xmm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 16(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 20(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm0, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm1, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 24(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 28(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %xmm4, %xmm9, %xmm1
# msg_sched done: 4-7
# msg_sched: 8-11
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm2, %xmm3, %xmm5
vpalignr $4, %xmm0, %xmm1, %xmm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 32(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 36(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm1, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm2, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 40(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 44(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %xmm4, %xmm9, %xmm2
# msg_sched done: 8-11
# msg_sched: 12-15
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm3, %xmm0, %xmm5
vpalignr $4, %xmm1, %xmm2, %xmm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 48(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 52(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm2, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm3, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 56(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 60(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %xmm4, %xmm9, %xmm3
# msg_sched done: 12-15
# set_w_k_xfer_4: 4
vpaddd 64(%rbp), %xmm0, %xmm4
vpaddd 80(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 96(%rbp), %xmm2, %xmm6
vpaddd 112(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
# msg_sched: 0-3
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm0, %xmm1, %xmm5
vpalignr $4, %xmm2, %xmm3, %xmm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl (%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 4(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm3, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm0, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 8(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 12(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %xmm4, %xmm9, %xmm0
# msg_sched done: 0-3
# msg_sched: 4-7
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm1, %xmm2, %xmm5
vpalignr $4, %xmm3, %xmm0, %xmm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 16(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 20(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm0, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm1, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 24(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 28(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %xmm4, %xmm9, %xmm1
# msg_sched done: 4-7
# msg_sched: 8-11
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm2, %xmm3, %xmm5
vpalignr $4, %xmm0, %xmm1, %xmm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 32(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 36(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm1, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm2, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 40(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 44(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %xmm4, %xmm9, %xmm2
# msg_sched done: 8-11
# msg_sched: 12-15
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm3, %xmm0, %xmm5
vpalignr $4, %xmm1, %xmm2, %xmm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 48(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 52(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm2, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm3, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 56(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 60(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %xmm4, %xmm9, %xmm3
# msg_sched done: 12-15
# set_w_k_xfer_4: 8
vpaddd 128(%rbp), %xmm0, %xmm4
vpaddd 144(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 160(%rbp), %xmm2, %xmm6
vpaddd 176(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
# msg_sched: 0-3
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm0, %xmm1, %xmm5
vpalignr $4, %xmm2, %xmm3, %xmm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl (%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 4(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm3, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm0, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 8(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 12(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %xmm4, %xmm9, %xmm0
# msg_sched done: 0-3
# msg_sched: 4-7
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm1, %xmm2, %xmm5
vpalignr $4, %xmm3, %xmm0, %xmm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 16(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 20(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm0, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm1, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 24(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 28(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %xmm4, %xmm9, %xmm1
# msg_sched done: 4-7
# msg_sched: 8-11
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm2, %xmm3, %xmm5
vpalignr $4, %xmm0, %xmm1, %xmm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 32(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 36(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm1, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm2, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 40(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 44(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %xmm4, %xmm9, %xmm2
# msg_sched done: 8-11
# msg_sched: 12-15
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm3, %xmm0, %xmm5
vpalignr $4, %xmm1, %xmm2, %xmm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 48(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 52(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm2, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm3, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 56(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 60(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %xmm4, %xmm9, %xmm3
# msg_sched done: 12-15
# set_w_k_xfer_4: 12
vpaddd 192(%rbp), %xmm0, %xmm4
vpaddd 208(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 224(%rbp), %xmm2, %xmm6
vpaddd 240(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
# rnd_all_4: 0-3
addl (%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 4(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 8(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 12(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 1-4
addl 16(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 20(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 24(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 28(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
# rnd_all_4: 2-5
addl 32(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 36(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 40(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 44(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 3-6
addl 48(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 52(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 56(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 60(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
addl %r8d, (%rdi)
addl %r9d, 4(%rdi)
addl %r10d, 8(%rdi)
addl %r11d, 12(%rdi)
addl %r12d, 16(%rdi)
addl %r13d, 20(%rdi)
addl %r14d, 24(%rdi)
addl %r15d, 28(%rdi)
xorq %rax, %rax
vzeroupper
addq $0x40, %rsp
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha256_AVX1,.-Transform_Sha256_AVX1
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl Transform_Sha256_AVX1_Len
.type Transform_Sha256_AVX1_Len,@function
.align 16
Transform_Sha256_AVX1_Len:
#else
.section __TEXT,__text
.globl _Transform_Sha256_AVX1_Len
.p2align 4
_Transform_Sha256_AVX1_Len:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
movq %rdx, %rbp
subq $0x44, %rsp
movl %ebp, 64(%rsp)
leaq L_avx1_sha256_k(%rip), %rbp
vmovdqa L_avx1_sha256_flip_mask(%rip), %xmm13
vmovdqa L_avx1_sha256_shuf_00BA(%rip), %xmm11
vmovdqa L_avx1_sha256_shuf_DC00(%rip), %xmm12
movl (%rdi), %r8d
movl 4(%rdi), %r9d
movl 8(%rdi), %r10d
movl 12(%rdi), %r11d
movl 16(%rdi), %r12d
movl 20(%rdi), %r13d
movl 24(%rdi), %r14d
movl 28(%rdi), %r15d
# Start of loop processing a block
L_sha256_len_avx1_start:
# X0, X1, X2, X3 = W[0..15]
vmovdqu (%rsi), %xmm0
vmovdqu 16(%rsi), %xmm1
vpshufb %xmm13, %xmm0, %xmm0
vpshufb %xmm13, %xmm1, %xmm1
vmovdqu 32(%rsi), %xmm2
vmovdqu 48(%rsi), %xmm3
vpshufb %xmm13, %xmm2, %xmm2
vpshufb %xmm13, %xmm3, %xmm3
movl %r9d, %ebx
movl %r12d, %edx
xorl %r10d, %ebx
# set_w_k_xfer_4: 0
vpaddd (%rbp), %xmm0, %xmm4
vpaddd 16(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 32(%rbp), %xmm2, %xmm6
vpaddd 48(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
# msg_sched: 0-3
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm0, %xmm1, %xmm5
vpalignr $4, %xmm2, %xmm3, %xmm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl (%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 4(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm3, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm0, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 8(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 12(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %xmm4, %xmm9, %xmm0
# msg_sched done: 0-3
# msg_sched: 4-7
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm1, %xmm2, %xmm5
vpalignr $4, %xmm3, %xmm0, %xmm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 16(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 20(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm0, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm1, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 24(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 28(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %xmm4, %xmm9, %xmm1
# msg_sched done: 4-7
# msg_sched: 8-11
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm2, %xmm3, %xmm5
vpalignr $4, %xmm0, %xmm1, %xmm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 32(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 36(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm1, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm2, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 40(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 44(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %xmm4, %xmm9, %xmm2
# msg_sched done: 8-11
# msg_sched: 12-15
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm3, %xmm0, %xmm5
vpalignr $4, %xmm1, %xmm2, %xmm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 48(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 52(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm2, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm3, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 56(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 60(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %xmm4, %xmm9, %xmm3
# msg_sched done: 12-15
# set_w_k_xfer_4: 4
vpaddd 64(%rbp), %xmm0, %xmm4
vpaddd 80(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 96(%rbp), %xmm2, %xmm6
vpaddd 112(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
# msg_sched: 0-3
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm0, %xmm1, %xmm5
vpalignr $4, %xmm2, %xmm3, %xmm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl (%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 4(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm3, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm0, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 8(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 12(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %xmm4, %xmm9, %xmm0
# msg_sched done: 0-3
# msg_sched: 4-7
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm1, %xmm2, %xmm5
vpalignr $4, %xmm3, %xmm0, %xmm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 16(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 20(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm0, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm1, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 24(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 28(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %xmm4, %xmm9, %xmm1
# msg_sched done: 4-7
# msg_sched: 8-11
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm2, %xmm3, %xmm5
vpalignr $4, %xmm0, %xmm1, %xmm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 32(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 36(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm1, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm2, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 40(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 44(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %xmm4, %xmm9, %xmm2
# msg_sched done: 8-11
# msg_sched: 12-15
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm3, %xmm0, %xmm5
vpalignr $4, %xmm1, %xmm2, %xmm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 48(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 52(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm2, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm3, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 56(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 60(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %xmm4, %xmm9, %xmm3
# msg_sched done: 12-15
# set_w_k_xfer_4: 8
vpaddd 128(%rbp), %xmm0, %xmm4
vpaddd 144(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 160(%rbp), %xmm2, %xmm6
vpaddd 176(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
# msg_sched: 0-3
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm0, %xmm1, %xmm5
vpalignr $4, %xmm2, %xmm3, %xmm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl (%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 4(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm3, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm0, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 8(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 12(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %xmm4, %xmm9, %xmm0
# msg_sched done: 0-3
# msg_sched: 4-7
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm1, %xmm2, %xmm5
vpalignr $4, %xmm3, %xmm0, %xmm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 16(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 20(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm0, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm1, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 24(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 28(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %xmm4, %xmm9, %xmm1
# msg_sched done: 4-7
# msg_sched: 8-11
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm2, %xmm3, %xmm5
vpalignr $4, %xmm0, %xmm1, %xmm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 32(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 36(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm1, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm2, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 40(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 44(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %xmm4, %xmm9, %xmm2
# msg_sched done: 8-11
# msg_sched: 12-15
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %xmm3, %xmm0, %xmm5
vpalignr $4, %xmm1, %xmm2, %xmm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 48(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %xmm5, %xmm8
vpslld $14, %xmm5, %xmm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %xmm6, %xmm7, %xmm6
vpor %xmm8, %xmm9, %xmm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 52(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %xmm5, %xmm9
vpxor %xmm6, %xmm8, %xmm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %xmm6, %xmm9, %xmm5
vpshufd $0xfa, %xmm2, %xmm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm3, %xmm4, %xmm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 56(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %xmm6, %xmm7, %xmm6
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 60(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %xmm6, %xmm8
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %xmm6, %xmm9
vpxor %xmm8, %xmm7, %xmm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %xmm9, %xmm8, %xmm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %xmm4, %xmm9, %xmm3
# msg_sched done: 12-15
# set_w_k_xfer_4: 12
vpaddd 192(%rbp), %xmm0, %xmm4
vpaddd 208(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 224(%rbp), %xmm2, %xmm6
vpaddd 240(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
# rnd_all_4: 0-3
addl (%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 4(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 8(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 12(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 1-4
addl 16(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 20(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 24(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 28(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
# rnd_all_4: 2-5
addl 32(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 36(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 40(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 44(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 3-6
addl 48(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 52(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 56(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 60(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
addl (%rdi), %r8d
addl 4(%rdi), %r9d
addl 8(%rdi), %r10d
addl 12(%rdi), %r11d
addl 16(%rdi), %r12d
addl 20(%rdi), %r13d
addl 24(%rdi), %r14d
addl 28(%rdi), %r15d
addq $0x40, %rsi
subl $0x40, 64(%rsp)
movl %r8d, (%rdi)
movl %r9d, 4(%rdi)
movl %r10d, 8(%rdi)
movl %r11d, 12(%rdi)
movl %r12d, 16(%rdi)
movl %r13d, 20(%rdi)
movl %r14d, 24(%rdi)
movl %r15d, 28(%rdi)
jnz L_sha256_len_avx1_start
xorq %rax, %rax
vzeroupper
addq $0x44, %rsp
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha256_AVX1_Len,.-Transform_Sha256_AVX1_Len
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_avx1_rorx_sha256_k:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0xfc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x6ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_rorx_sha256_shuf_00BA:
.quad 0xb0a090803020100, 0xffffffffffffffff
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_rorx_sha256_shuf_DC00:
.quad 0xffffffffffffffff, 0xb0a090803020100
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_rorx_sha256_flip_mask:
.quad 0x405060700010203, 0xc0d0e0f08090a0b
#ifndef __APPLE__
.text
.globl Transform_Sha256_AVX1_RORX
.type Transform_Sha256_AVX1_RORX,@function
.align 16
Transform_Sha256_AVX1_RORX:
#else
.section __TEXT,__text
.globl _Transform_Sha256_AVX1_RORX
.p2align 4
_Transform_Sha256_AVX1_RORX:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
subq $0x40, %rsp
leaq L_avx1_rorx_sha256_k(%rip), %rbp
vmovdqa L_avx1_rorx_sha256_flip_mask(%rip), %xmm13
vmovdqa L_avx1_rorx_sha256_shuf_00BA(%rip), %xmm11
vmovdqa L_avx1_rorx_sha256_shuf_DC00(%rip), %xmm12
# X0, X1, X2, X3 = W[0..15]
vmovdqu (%rsi), %xmm0
vmovdqu 16(%rsi), %xmm1
vpshufb %xmm13, %xmm0, %xmm0
vpshufb %xmm13, %xmm1, %xmm1
vmovdqu 32(%rsi), %xmm2
vmovdqu 48(%rsi), %xmm3
vpshufb %xmm13, %xmm2, %xmm2
vpshufb %xmm13, %xmm3, %xmm3
movl (%rdi), %r8d
movl 4(%rdi), %r9d
movl 8(%rdi), %r10d
movl 12(%rdi), %r11d
movl 16(%rdi), %r12d
movl 20(%rdi), %r13d
movl 24(%rdi), %r14d
movl 28(%rdi), %r15d
# set_w_k_xfer_4: 0
vpaddd (%rbp), %xmm0, %xmm4
vpaddd 16(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 32(%rbp), %xmm2, %xmm6
vpaddd 48(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
movl %r9d, %ebx
rorxl $6, %r12d, %edx
xorl %r10d, %ebx
# msg_sched: 0-3
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl (%rsp), %r15d
vpalignr $4, %xmm2, %xmm3, %xmm4
vpalignr $4, %xmm0, %xmm1, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 4(%rsp), %r14d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpshufd $0xfa, %xmm3, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 8(%rsp), %r13d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm0, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 12(%rsp), %r12d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vpaddd %xmm4, %xmm9, %xmm0
# msg_sched done: 0-3
# msg_sched: 4-7
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 16(%rsp), %r11d
vpalignr $4, %xmm3, %xmm0, %xmm4
vpalignr $4, %xmm1, %xmm2, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 20(%rsp), %r10d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpshufd $0xfa, %xmm0, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 24(%rsp), %r9d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm1, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 28(%rsp), %r8d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vpaddd %xmm4, %xmm9, %xmm1
# msg_sched done: 4-7
# msg_sched: 8-11
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 32(%rsp), %r15d
vpalignr $4, %xmm0, %xmm1, %xmm4
vpalignr $4, %xmm2, %xmm3, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 36(%rsp), %r14d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpshufd $0xfa, %xmm1, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 40(%rsp), %r13d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm2, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 44(%rsp), %r12d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vpaddd %xmm4, %xmm9, %xmm2
# msg_sched done: 8-11
# msg_sched: 12-15
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 48(%rsp), %r11d
vpalignr $4, %xmm1, %xmm2, %xmm4
vpalignr $4, %xmm3, %xmm0, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 52(%rsp), %r10d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpshufd $0xfa, %xmm2, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 56(%rsp), %r9d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm3, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 60(%rsp), %r8d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vpaddd %xmm4, %xmm9, %xmm3
# msg_sched done: 12-15
# set_w_k_xfer_4: 4
vpaddd 64(%rbp), %xmm0, %xmm4
vpaddd 80(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 96(%rbp), %xmm2, %xmm6
vpaddd 112(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
# msg_sched: 0-3
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl (%rsp), %r15d
vpalignr $4, %xmm2, %xmm3, %xmm4
vpalignr $4, %xmm0, %xmm1, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 4(%rsp), %r14d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpshufd $0xfa, %xmm3, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 8(%rsp), %r13d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm0, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 12(%rsp), %r12d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vpaddd %xmm4, %xmm9, %xmm0
# msg_sched done: 0-3
# msg_sched: 4-7
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 16(%rsp), %r11d
vpalignr $4, %xmm3, %xmm0, %xmm4
vpalignr $4, %xmm1, %xmm2, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 20(%rsp), %r10d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpshufd $0xfa, %xmm0, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 24(%rsp), %r9d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm1, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 28(%rsp), %r8d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vpaddd %xmm4, %xmm9, %xmm1
# msg_sched done: 4-7
# msg_sched: 8-11
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 32(%rsp), %r15d
vpalignr $4, %xmm0, %xmm1, %xmm4
vpalignr $4, %xmm2, %xmm3, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 36(%rsp), %r14d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpshufd $0xfa, %xmm1, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 40(%rsp), %r13d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm2, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 44(%rsp), %r12d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vpaddd %xmm4, %xmm9, %xmm2
# msg_sched done: 8-11
# msg_sched: 12-15
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 48(%rsp), %r11d
vpalignr $4, %xmm1, %xmm2, %xmm4
vpalignr $4, %xmm3, %xmm0, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 52(%rsp), %r10d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpshufd $0xfa, %xmm2, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 56(%rsp), %r9d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm3, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 60(%rsp), %r8d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vpaddd %xmm4, %xmm9, %xmm3
# msg_sched done: 12-15
# set_w_k_xfer_4: 8
vpaddd 128(%rbp), %xmm0, %xmm4
vpaddd 144(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 160(%rbp), %xmm2, %xmm6
vpaddd 176(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
# msg_sched: 0-3
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl (%rsp), %r15d
vpalignr $4, %xmm2, %xmm3, %xmm4
vpalignr $4, %xmm0, %xmm1, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 4(%rsp), %r14d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpshufd $0xfa, %xmm3, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 8(%rsp), %r13d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm0, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 12(%rsp), %r12d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vpaddd %xmm4, %xmm9, %xmm0
# msg_sched done: 0-3
# msg_sched: 4-7
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 16(%rsp), %r11d
vpalignr $4, %xmm3, %xmm0, %xmm4
vpalignr $4, %xmm1, %xmm2, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 20(%rsp), %r10d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpshufd $0xfa, %xmm0, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 24(%rsp), %r9d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm1, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 28(%rsp), %r8d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vpaddd %xmm4, %xmm9, %xmm1
# msg_sched done: 4-7
# msg_sched: 8-11
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 32(%rsp), %r15d
vpalignr $4, %xmm0, %xmm1, %xmm4
vpalignr $4, %xmm2, %xmm3, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 36(%rsp), %r14d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpshufd $0xfa, %xmm1, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 40(%rsp), %r13d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm2, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 44(%rsp), %r12d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vpaddd %xmm4, %xmm9, %xmm2
# msg_sched done: 8-11
# msg_sched: 12-15
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 48(%rsp), %r11d
vpalignr $4, %xmm1, %xmm2, %xmm4
vpalignr $4, %xmm3, %xmm0, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 52(%rsp), %r10d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpshufd $0xfa, %xmm2, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 56(%rsp), %r9d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm3, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 60(%rsp), %r8d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vpaddd %xmm4, %xmm9, %xmm3
# msg_sched done: 12-15
# set_w_k_xfer_4: 12
vpaddd 192(%rbp), %xmm0, %xmm4
vpaddd 208(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 224(%rbp), %xmm2, %xmm6
vpaddd 240(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
xorl %eax, %eax
# rnd_all_4: 0-3
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
addl %eax, %r8d
addl (%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 4(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
addl %eax, %r14d
addl 8(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 12(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
# rnd_all_4: 1-4
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
addl %eax, %r12d
addl 16(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 20(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
addl %eax, %r10d
addl 24(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 28(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
# rnd_all_4: 2-5
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
addl %eax, %r8d
addl 32(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 36(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
addl %eax, %r14d
addl 40(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 44(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
# rnd_all_4: 3-6
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
addl %eax, %r12d
addl 48(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 52(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
addl %eax, %r10d
addl 56(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 60(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
addl %eax, %r8d
addl %r8d, (%rdi)
addl %r9d, 4(%rdi)
addl %r10d, 8(%rdi)
addl %r11d, 12(%rdi)
addl %r12d, 16(%rdi)
addl %r13d, 20(%rdi)
addl %r14d, 24(%rdi)
addl %r15d, 28(%rdi)
xorq %rax, %rax
vzeroupper
addq $0x40, %rsp
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha256_AVX1_RORX,.-Transform_Sha256_AVX1_RORX
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl Transform_Sha256_AVX1_RORX_Len
.type Transform_Sha256_AVX1_RORX_Len,@function
.align 16
Transform_Sha256_AVX1_RORX_Len:
#else
.section __TEXT,__text
.globl _Transform_Sha256_AVX1_RORX_Len
.p2align 4
_Transform_Sha256_AVX1_RORX_Len:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
movq %rdx, %rbp
subq $0x44, %rsp
movl %ebp, 64(%rsp)
leaq L_avx1_rorx_sha256_k(%rip), %rbp
vmovdqa L_avx1_rorx_sha256_flip_mask(%rip), %xmm13
vmovdqa L_avx1_rorx_sha256_shuf_00BA(%rip), %xmm11
vmovdqa L_avx1_rorx_sha256_shuf_DC00(%rip), %xmm12
movl (%rdi), %r8d
movl 4(%rdi), %r9d
movl 8(%rdi), %r10d
movl 12(%rdi), %r11d
movl 16(%rdi), %r12d
movl 20(%rdi), %r13d
movl 24(%rdi), %r14d
movl 28(%rdi), %r15d
# Start of loop processing a block
L_sha256_len_avx1_len_rorx_start:
# X0, X1, X2, X3 = W[0..15]
vmovdqu (%rsi), %xmm0
vmovdqu 16(%rsi), %xmm1
vpshufb %xmm13, %xmm0, %xmm0
vpshufb %xmm13, %xmm1, %xmm1
vmovdqu 32(%rsi), %xmm2
vmovdqu 48(%rsi), %xmm3
vpshufb %xmm13, %xmm2, %xmm2
vpshufb %xmm13, %xmm3, %xmm3
# set_w_k_xfer_4: 0
vpaddd (%rbp), %xmm0, %xmm4
vpaddd 16(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 32(%rbp), %xmm2, %xmm6
vpaddd 48(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
movl %r9d, %ebx
rorxl $6, %r12d, %edx
xorl %r10d, %ebx
# msg_sched: 0-3
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl (%rsp), %r15d
vpalignr $4, %xmm2, %xmm3, %xmm4
vpalignr $4, %xmm0, %xmm1, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 4(%rsp), %r14d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpshufd $0xfa, %xmm3, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 8(%rsp), %r13d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm0, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 12(%rsp), %r12d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vpaddd %xmm4, %xmm9, %xmm0
# msg_sched done: 0-3
# msg_sched: 4-7
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 16(%rsp), %r11d
vpalignr $4, %xmm3, %xmm0, %xmm4
vpalignr $4, %xmm1, %xmm2, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 20(%rsp), %r10d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpshufd $0xfa, %xmm0, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 24(%rsp), %r9d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm1, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 28(%rsp), %r8d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vpaddd %xmm4, %xmm9, %xmm1
# msg_sched done: 4-7
# msg_sched: 8-11
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 32(%rsp), %r15d
vpalignr $4, %xmm0, %xmm1, %xmm4
vpalignr $4, %xmm2, %xmm3, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 36(%rsp), %r14d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpshufd $0xfa, %xmm1, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 40(%rsp), %r13d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm2, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 44(%rsp), %r12d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vpaddd %xmm4, %xmm9, %xmm2
# msg_sched done: 8-11
# msg_sched: 12-15
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 48(%rsp), %r11d
vpalignr $4, %xmm1, %xmm2, %xmm4
vpalignr $4, %xmm3, %xmm0, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 52(%rsp), %r10d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpshufd $0xfa, %xmm2, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 56(%rsp), %r9d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm3, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 60(%rsp), %r8d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vpaddd %xmm4, %xmm9, %xmm3
# msg_sched done: 12-15
# set_w_k_xfer_4: 4
vpaddd 64(%rbp), %xmm0, %xmm4
vpaddd 80(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 96(%rbp), %xmm2, %xmm6
vpaddd 112(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
# msg_sched: 0-3
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl (%rsp), %r15d
vpalignr $4, %xmm2, %xmm3, %xmm4
vpalignr $4, %xmm0, %xmm1, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 4(%rsp), %r14d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpshufd $0xfa, %xmm3, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 8(%rsp), %r13d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm0, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 12(%rsp), %r12d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vpaddd %xmm4, %xmm9, %xmm0
# msg_sched done: 0-3
# msg_sched: 4-7
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 16(%rsp), %r11d
vpalignr $4, %xmm3, %xmm0, %xmm4
vpalignr $4, %xmm1, %xmm2, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 20(%rsp), %r10d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpshufd $0xfa, %xmm0, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 24(%rsp), %r9d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm1, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 28(%rsp), %r8d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vpaddd %xmm4, %xmm9, %xmm1
# msg_sched done: 4-7
# msg_sched: 8-11
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 32(%rsp), %r15d
vpalignr $4, %xmm0, %xmm1, %xmm4
vpalignr $4, %xmm2, %xmm3, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 36(%rsp), %r14d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpshufd $0xfa, %xmm1, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 40(%rsp), %r13d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm2, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 44(%rsp), %r12d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vpaddd %xmm4, %xmm9, %xmm2
# msg_sched done: 8-11
# msg_sched: 12-15
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 48(%rsp), %r11d
vpalignr $4, %xmm1, %xmm2, %xmm4
vpalignr $4, %xmm3, %xmm0, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 52(%rsp), %r10d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpshufd $0xfa, %xmm2, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 56(%rsp), %r9d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm3, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 60(%rsp), %r8d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vpaddd %xmm4, %xmm9, %xmm3
# msg_sched done: 12-15
# set_w_k_xfer_4: 8
vpaddd 128(%rbp), %xmm0, %xmm4
vpaddd 144(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 160(%rbp), %xmm2, %xmm6
vpaddd 176(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
# msg_sched: 0-3
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl (%rsp), %r15d
vpalignr $4, %xmm2, %xmm3, %xmm4
vpalignr $4, %xmm0, %xmm1, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 4(%rsp), %r14d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpshufd $0xfa, %xmm3, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 8(%rsp), %r13d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm0, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 12(%rsp), %r12d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vpaddd %xmm4, %xmm9, %xmm0
# msg_sched done: 0-3
# msg_sched: 4-7
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 16(%rsp), %r11d
vpalignr $4, %xmm3, %xmm0, %xmm4
vpalignr $4, %xmm1, %xmm2, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 20(%rsp), %r10d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpshufd $0xfa, %xmm0, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 24(%rsp), %r9d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm1, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 28(%rsp), %r8d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vpaddd %xmm4, %xmm9, %xmm1
# msg_sched done: 4-7
# msg_sched: 8-11
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 32(%rsp), %r15d
vpalignr $4, %xmm0, %xmm1, %xmm4
vpalignr $4, %xmm2, %xmm3, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 36(%rsp), %r14d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpshufd $0xfa, %xmm1, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 40(%rsp), %r13d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm2, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 44(%rsp), %r12d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vpaddd %xmm4, %xmm9, %xmm2
# msg_sched done: 8-11
# msg_sched: 12-15
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 48(%rsp), %r11d
vpalignr $4, %xmm1, %xmm2, %xmm4
vpalignr $4, %xmm3, %xmm0, %xmm5
# rnd_0: 1 - 2
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %xmm5, %xmm6
vpslld $25, %xmm5, %xmm7
# rnd_0: 3 - 4
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $3, %xmm5, %xmm8
vpor %xmm6, %xmm7, %xmm7
# rnd_0: 5 - 7
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 52(%rsp), %r10d
vpsrld $18, %xmm5, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpslld $14, %xmm5, %xmm5
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpxor %xmm5, %xmm7, %xmm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %xmm6, %xmm7, %xmm7
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpshufd $0xfa, %xmm2, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
vpxor %xmm8, %xmm7, %xmm5
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrld $10, %xmm6, %xmm8
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 56(%rsp), %r9d
vpsrlq $19, %xmm6, %xmm7
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpsrlq $0x11, %xmm6, %xmm6
vpaddd %xmm3, %xmm4, %xmm4
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %xmm5, %xmm4, %xmm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpxor %xmm7, %xmm6, %xmm6
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpxor %xmm6, %xmm8, %xmm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufb %xmm11, %xmm8, %xmm8
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpaddd %xmm8, %xmm4, %xmm4
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 60(%rsp), %r8d
vpshufd $0x50, %xmm4, %xmm6
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpsrld $10, %xmm6, %xmm9
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpsrlq $19, %xmm6, %xmm7
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpsrlq $0x11, %xmm6, %xmm6
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpxor %xmm7, %xmm6, %xmm6
# rnd_1: 5 - 5
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
vpxor %xmm6, %xmm9, %xmm9
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
vpshufb %xmm12, %xmm9, %xmm9
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vpaddd %xmm4, %xmm9, %xmm3
# msg_sched done: 12-15
# set_w_k_xfer_4: 12
vpaddd 192(%rbp), %xmm0, %xmm4
vpaddd 208(%rbp), %xmm1, %xmm5
vmovdqu %xmm4, (%rsp)
vmovdqu %xmm5, 16(%rsp)
vpaddd 224(%rbp), %xmm2, %xmm6
vpaddd 240(%rbp), %xmm3, %xmm7
vmovdqu %xmm6, 32(%rsp)
vmovdqu %xmm7, 48(%rsp)
xorl %eax, %eax
xorl %ecx, %ecx
# rnd_all_4: 0-3
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
addl %eax, %r8d
addl (%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 4(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
addl %eax, %r14d
addl 8(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 12(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
# rnd_all_4: 1-4
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
addl %eax, %r12d
addl 16(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 20(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
addl %eax, %r10d
addl 24(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 28(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
# rnd_all_4: 2-5
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
addl %eax, %r8d
addl 32(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 36(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
addl %r14d, %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
addl %eax, %r14d
addl 40(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 44(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
addl %r12d, %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
# rnd_all_4: 3-6
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
addl %eax, %r12d
addl 48(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 52(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
addl %r10d, %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
addl %eax, %r10d
addl 56(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 60(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
addl %r8d, %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
addl %eax, %r8d
addl (%rdi), %r8d
addl 4(%rdi), %r9d
addl 8(%rdi), %r10d
addl 12(%rdi), %r11d
addl 16(%rdi), %r12d
addl 20(%rdi), %r13d
addl 24(%rdi), %r14d
addl 28(%rdi), %r15d
addq $0x40, %rsi
subl $0x40, 64(%rsp)
movl %r8d, (%rdi)
movl %r9d, 4(%rdi)
movl %r10d, 8(%rdi)
movl %r11d, 12(%rdi)
movl %r12d, 16(%rdi)
movl %r13d, 20(%rdi)
movl %r14d, 24(%rdi)
movl %r15d, 28(%rdi)
jnz L_sha256_len_avx1_len_rorx_start
xorq %rax, %rax
vzeroupper
addq $0x44, %rsp
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha256_AVX1_RORX_Len,.-Transform_Sha256_AVX1_RORX_Len
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_avx1_sha256_sha_k:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0xfc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x6ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_avx1_sha256_shuf_mask:
.quad 0x405060700010203, 0xc0d0e0f08090a0b
#ifndef __APPLE__
.text
.globl Transform_Sha256_AVX1_Sha
.type Transform_Sha256_AVX1_Sha,@function
.align 16
Transform_Sha256_AVX1_Sha:
#else
.section __TEXT,__text
.globl _Transform_Sha256_AVX1_Sha
.p2align 4
_Transform_Sha256_AVX1_Sha:
#endif /* __APPLE__ */
leaq L_avx1_sha256_sha_k(%rip), %rdx
vmovdqa L_avx1_sha256_shuf_mask(%rip), %xmm10
vmovq (%rdi), %xmm1
vmovq 8(%rdi), %xmm2
vmovhpd 16(%rdi), %xmm1, %xmm1
vmovhpd 24(%rdi), %xmm2, %xmm2
vpshufd $27, %xmm1, %xmm1
vpshufd $27, %xmm2, %xmm2
vmovdqu (%rsi), %xmm3
vmovdqu 16(%rsi), %xmm4
vmovdqu 32(%rsi), %xmm5
vmovdqu 48(%rsi), %xmm6
vpshufb %xmm10, %xmm3, %xmm3
vmovdqa %xmm1, %xmm8
vmovdqa %xmm2, %xmm9
# Rounds: 0-3
vpaddd (%rdx), %xmm3, %xmm0
sha256rnds2 %xmm1, %xmm2
vpshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
# Rounds: 4-7
vpshufb %xmm10, %xmm4, %xmm4
vpaddd 16(%rdx), %xmm4, %xmm0
sha256rnds2 %xmm1, %xmm2
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm4, %xmm3
sha256rnds2 %xmm2, %xmm1
# Rounds: 8-11
vpshufb %xmm10, %xmm5, %xmm5
vpaddd 32(%rdx), %xmm5, %xmm0
sha256rnds2 %xmm1, %xmm2
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm5, %xmm4
sha256rnds2 %xmm2, %xmm1
# Rounds: 12-15
vpshufb %xmm10, %xmm6, %xmm6
vpaddd 48(%rdx), %xmm6, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm5, %xmm6, %xmm7
vpaddd %xmm7, %xmm3, %xmm3
sha256msg2 %xmm6, %xmm3
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm6, %xmm5
sha256rnds2 %xmm2, %xmm1
# Rounds: 16-19
vpaddd 64(%rdx), %xmm3, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm6, %xmm3, %xmm7
vpaddd %xmm7, %xmm4, %xmm4
sha256msg2 %xmm3, %xmm4
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm3, %xmm6
sha256rnds2 %xmm2, %xmm1
# Rounds: 20-23
vpaddd 80(%rdx), %xmm4, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm3, %xmm4, %xmm7
vpaddd %xmm7, %xmm5, %xmm5
sha256msg2 %xmm4, %xmm5
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm4, %xmm3
sha256rnds2 %xmm2, %xmm1
# Rounds: 24-27
vpaddd 96(%rdx), %xmm5, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm4, %xmm5, %xmm7
vpaddd %xmm7, %xmm6, %xmm6
sha256msg2 %xmm5, %xmm6
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm5, %xmm4
sha256rnds2 %xmm2, %xmm1
# Rounds: 28-31
vpaddd 112(%rdx), %xmm6, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm5, %xmm6, %xmm7
vpaddd %xmm7, %xmm3, %xmm3
sha256msg2 %xmm6, %xmm3
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm6, %xmm5
sha256rnds2 %xmm2, %xmm1
# Rounds: 32-35
vpaddd 128(%rdx), %xmm3, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm6, %xmm3, %xmm7
vpaddd %xmm7, %xmm4, %xmm4
sha256msg2 %xmm3, %xmm4
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm3, %xmm6
sha256rnds2 %xmm2, %xmm1
# Rounds: 36-39
vpaddd 144(%rdx), %xmm4, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm3, %xmm4, %xmm7
vpaddd %xmm7, %xmm5, %xmm5
sha256msg2 %xmm4, %xmm5
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm4, %xmm3
sha256rnds2 %xmm2, %xmm1
# Rounds: 40-43
vpaddd 160(%rdx), %xmm5, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm4, %xmm5, %xmm7
vpaddd %xmm7, %xmm6, %xmm6
sha256msg2 %xmm5, %xmm6
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm5, %xmm4
sha256rnds2 %xmm2, %xmm1
# Rounds: 44-47
vpaddd 176(%rdx), %xmm6, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm5, %xmm6, %xmm7
vpaddd %xmm7, %xmm3, %xmm3
sha256msg2 %xmm6, %xmm3
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm6, %xmm5
sha256rnds2 %xmm2, %xmm1
# Rounds: 48-51
vpaddd 192(%rdx), %xmm3, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm6, %xmm3, %xmm7
vpaddd %xmm7, %xmm4, %xmm4
sha256msg2 %xmm3, %xmm4
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm3, %xmm6
sha256rnds2 %xmm2, %xmm1
# Rounds: 52-63
vpaddd 208(%rdx), %xmm4, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm3, %xmm4, %xmm7
vpaddd %xmm7, %xmm5, %xmm5
sha256msg2 %xmm4, %xmm5
vpshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
vpaddd 224(%rdx), %xmm5, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm4, %xmm5, %xmm7
vpaddd %xmm7, %xmm6, %xmm6
sha256msg2 %xmm5, %xmm6
vpshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
vpaddd 240(%rdx), %xmm6, %xmm0
sha256rnds2 %xmm1, %xmm2
vpshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
vpaddd %xmm8, %xmm1, %xmm1
vpaddd %xmm9, %xmm2, %xmm2
vpshufd $27, %xmm1, %xmm1
vpshufd $27, %xmm2, %xmm2
vmovq %xmm1, (%rdi)
vmovq %xmm2, 8(%rdi)
vmovhpd %xmm1, 16(%rdi)
vmovhpd %xmm2, 24(%rdi)
xorq %rax, %rax
vzeroupper
repz retq
#ifndef __APPLE__
.size Transform_Sha256_AVX1_Sha,.-Transform_Sha256_AVX1_Sha
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl Transform_Sha256_AVX1_Sha_Len
.type Transform_Sha256_AVX1_Sha_Len,@function
.align 16
Transform_Sha256_AVX1_Sha_Len:
#else
.section __TEXT,__text
.globl _Transform_Sha256_AVX1_Sha_Len
.p2align 4
_Transform_Sha256_AVX1_Sha_Len:
#endif /* __APPLE__ */
leaq L_avx1_sha256_sha_k(%rip), %rax
vmovdqa L_avx1_sha256_shuf_mask(%rip), %xmm10
vmovq (%rdi), %xmm1
vmovq 8(%rdi), %xmm2
vmovhpd 16(%rdi), %xmm1, %xmm1
vmovhpd 24(%rdi), %xmm2, %xmm2
vpshufd $27, %xmm1, %xmm1
vpshufd $27, %xmm2, %xmm2
# Start of loop processing a block
L_sha256_sha_len_avx1_start:
vmovdqu (%rsi), %xmm3
vmovdqu 16(%rsi), %xmm4
vmovdqu 32(%rsi), %xmm5
vmovdqu 48(%rsi), %xmm6
vpshufb %xmm10, %xmm3, %xmm3
vmovdqa %xmm1, %xmm8
vmovdqa %xmm2, %xmm9
# Rounds: 0-3
vpaddd (%rax), %xmm3, %xmm0
sha256rnds2 %xmm1, %xmm2
vpshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
# Rounds: 4-7
vpshufb %xmm10, %xmm4, %xmm4
vpaddd 16(%rax), %xmm4, %xmm0
sha256rnds2 %xmm1, %xmm2
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm4, %xmm3
sha256rnds2 %xmm2, %xmm1
# Rounds: 8-11
vpshufb %xmm10, %xmm5, %xmm5
vpaddd 32(%rax), %xmm5, %xmm0
sha256rnds2 %xmm1, %xmm2
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm5, %xmm4
sha256rnds2 %xmm2, %xmm1
# Rounds: 12-15
vpshufb %xmm10, %xmm6, %xmm6
vpaddd 48(%rax), %xmm6, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm5, %xmm6, %xmm7
vpaddd %xmm7, %xmm3, %xmm3
sha256msg2 %xmm6, %xmm3
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm6, %xmm5
sha256rnds2 %xmm2, %xmm1
# Rounds: 16-19
vpaddd 64(%rax), %xmm3, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm6, %xmm3, %xmm7
vpaddd %xmm7, %xmm4, %xmm4
sha256msg2 %xmm3, %xmm4
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm3, %xmm6
sha256rnds2 %xmm2, %xmm1
# Rounds: 20-23
vpaddd 80(%rax), %xmm4, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm3, %xmm4, %xmm7
vpaddd %xmm7, %xmm5, %xmm5
sha256msg2 %xmm4, %xmm5
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm4, %xmm3
sha256rnds2 %xmm2, %xmm1
# Rounds: 24-27
vpaddd 96(%rax), %xmm5, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm4, %xmm5, %xmm7
vpaddd %xmm7, %xmm6, %xmm6
sha256msg2 %xmm5, %xmm6
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm5, %xmm4
sha256rnds2 %xmm2, %xmm1
# Rounds: 28-31
vpaddd 112(%rax), %xmm6, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm5, %xmm6, %xmm7
vpaddd %xmm7, %xmm3, %xmm3
sha256msg2 %xmm6, %xmm3
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm6, %xmm5
sha256rnds2 %xmm2, %xmm1
# Rounds: 32-35
vpaddd 128(%rax), %xmm3, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm6, %xmm3, %xmm7
vpaddd %xmm7, %xmm4, %xmm4
sha256msg2 %xmm3, %xmm4
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm3, %xmm6
sha256rnds2 %xmm2, %xmm1
# Rounds: 36-39
vpaddd 144(%rax), %xmm4, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm3, %xmm4, %xmm7
vpaddd %xmm7, %xmm5, %xmm5
sha256msg2 %xmm4, %xmm5
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm4, %xmm3
sha256rnds2 %xmm2, %xmm1
# Rounds: 40-43
vpaddd 160(%rax), %xmm5, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm4, %xmm5, %xmm7
vpaddd %xmm7, %xmm6, %xmm6
sha256msg2 %xmm5, %xmm6
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm5, %xmm4
sha256rnds2 %xmm2, %xmm1
# Rounds: 44-47
vpaddd 176(%rax), %xmm6, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm5, %xmm6, %xmm7
vpaddd %xmm7, %xmm3, %xmm3
sha256msg2 %xmm6, %xmm3
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm6, %xmm5
sha256rnds2 %xmm2, %xmm1
# Rounds: 48-51
vpaddd 192(%rax), %xmm3, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm6, %xmm3, %xmm7
vpaddd %xmm7, %xmm4, %xmm4
sha256msg2 %xmm3, %xmm4
vpshufd $14, %xmm0, %xmm0
sha256msg1 %xmm3, %xmm6
sha256rnds2 %xmm2, %xmm1
# Rounds: 52-63
vpaddd 208(%rax), %xmm4, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm3, %xmm4, %xmm7
vpaddd %xmm7, %xmm5, %xmm5
sha256msg2 %xmm4, %xmm5
vpshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
vpaddd 224(%rax), %xmm5, %xmm0
sha256rnds2 %xmm1, %xmm2
vpalignr $4, %xmm4, %xmm5, %xmm7
vpaddd %xmm7, %xmm6, %xmm6
sha256msg2 %xmm5, %xmm6
vpshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
vpaddd 240(%rax), %xmm6, %xmm0
sha256rnds2 %xmm1, %xmm2
vpshufd $14, %xmm0, %xmm0
sha256rnds2 %xmm2, %xmm1
addq $0x40, %rsi
subl $0x40, %edx
vpaddd %xmm8, %xmm1, %xmm1
vpaddd %xmm9, %xmm2, %xmm2
jnz L_sha256_sha_len_avx1_start
vpshufd $27, %xmm1, %xmm1
vpshufd $27, %xmm2, %xmm2
vmovq %xmm1, (%rdi)
vmovq %xmm2, 8(%rdi)
vmovhpd %xmm1, 16(%rdi)
vmovhpd %xmm2, 24(%rdi)
xorq %rax, %rax
vzeroupper
repz retq
#ifndef __APPLE__
.size Transform_Sha256_AVX1_Sha_Len,.-Transform_Sha256_AVX1_Sha_Len
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX1 */
#ifdef HAVE_INTEL_AVX2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_avx2_sha256_k:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0xfc19dc6,0x240ca1cc
.long 0xe49b69c1,0xefbe4786,0xfc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x6ca6351,0x14292967
.long 0xc6e00bf3,0xd5a79147,0x6ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_avx2_sha256_shuf_00BA:
.quad 0xb0a090803020100, 0xffffffffffffffff
.quad 0xb0a090803020100, 0xffffffffffffffff
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_avx2_sha256_shuf_DC00:
.quad 0xffffffffffffffff, 0xb0a090803020100
.quad 0xffffffffffffffff, 0xb0a090803020100
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_avx2_sha256_flip_mask:
.quad 0x405060700010203, 0xc0d0e0f08090a0b
.quad 0x405060700010203, 0xc0d0e0f08090a0b
#ifndef __APPLE__
.text
.globl Transform_Sha256_AVX2
.type Transform_Sha256_AVX2,@function
.align 16
Transform_Sha256_AVX2:
#else
.section __TEXT,__text
.globl _Transform_Sha256_AVX2
.p2align 4
_Transform_Sha256_AVX2:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
subq $0x200, %rsp
leaq L_avx2_sha256_k(%rip), %rbp
vmovdqa L_avx2_sha256_flip_mask(%rip), %xmm13
vmovdqa L_avx2_sha256_shuf_00BA(%rip), %ymm11
vmovdqa L_avx2_sha256_shuf_DC00(%rip), %ymm12
movl (%rdi), %r8d
movl 4(%rdi), %r9d
movl 8(%rdi), %r10d
movl 12(%rdi), %r11d
movl 16(%rdi), %r12d
movl 20(%rdi), %r13d
movl 24(%rdi), %r14d
movl 28(%rdi), %r15d
# X0, X1, X2, X3 = W[0..15]
vmovdqu (%rsi), %xmm0
vmovdqu 16(%rsi), %xmm1
vpshufb %xmm13, %xmm0, %xmm0
vpshufb %xmm13, %xmm1, %xmm1
vmovdqu 32(%rsi), %xmm2
vmovdqu 48(%rsi), %xmm3
vpshufb %xmm13, %xmm2, %xmm2
vpshufb %xmm13, %xmm3, %xmm3
movl %r9d, %ebx
movl %r12d, %edx
xorl %r10d, %ebx
# set_w_k_xfer_4: 0
vpaddd (%rbp), %ymm0, %ymm4
vpaddd 32(%rbp), %ymm1, %ymm5
vmovdqu %ymm4, (%rsp)
vmovdqu %ymm5, 32(%rsp)
vpaddd 64(%rbp), %ymm2, %ymm4
vpaddd 96(%rbp), %ymm3, %ymm5
vmovdqu %ymm4, 64(%rsp)
vmovdqu %ymm5, 96(%rsp)
# msg_sched: 0-3
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm0, %ymm1, %ymm5
vpalignr $4, %ymm2, %ymm3, %ymm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl (%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 4(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm3, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm0, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 8(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 12(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %ymm4, %ymm9, %ymm0
# msg_sched done: 0-3
# msg_sched: 8-11
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm1, %ymm2, %ymm5
vpalignr $4, %ymm3, %ymm0, %ymm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 32(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 36(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm0, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm1, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 40(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 44(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %ymm4, %ymm9, %ymm1
# msg_sched done: 8-11
# msg_sched: 16-19
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm2, %ymm3, %ymm5
vpalignr $4, %ymm0, %ymm1, %ymm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 64(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 68(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm1, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm2, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 72(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 76(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %ymm4, %ymm9, %ymm2
# msg_sched done: 16-19
# msg_sched: 24-27
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm3, %ymm0, %ymm5
vpalignr $4, %ymm1, %ymm2, %ymm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 96(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 100(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm2, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm3, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 104(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 108(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %ymm4, %ymm9, %ymm3
# msg_sched done: 24-27
# set_w_k_xfer_4: 4
vpaddd 128(%rbp), %ymm0, %ymm4
vpaddd 160(%rbp), %ymm1, %ymm5
vmovdqu %ymm4, 128(%rsp)
vmovdqu %ymm5, 160(%rsp)
vpaddd 192(%rbp), %ymm2, %ymm4
vpaddd 224(%rbp), %ymm3, %ymm5
vmovdqu %ymm4, 192(%rsp)
vmovdqu %ymm5, 224(%rsp)
# msg_sched: 32-35
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm0, %ymm1, %ymm5
vpalignr $4, %ymm2, %ymm3, %ymm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 128(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 132(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm3, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm0, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 136(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 140(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %ymm4, %ymm9, %ymm0
# msg_sched done: 32-35
# msg_sched: 40-43
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm1, %ymm2, %ymm5
vpalignr $4, %ymm3, %ymm0, %ymm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 160(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 164(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm0, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm1, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 168(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 172(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %ymm4, %ymm9, %ymm1
# msg_sched done: 40-43
# msg_sched: 48-51
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm2, %ymm3, %ymm5
vpalignr $4, %ymm0, %ymm1, %ymm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 192(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 196(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm1, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm2, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 200(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 204(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %ymm4, %ymm9, %ymm2
# msg_sched done: 48-51
# msg_sched: 56-59
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm3, %ymm0, %ymm5
vpalignr $4, %ymm1, %ymm2, %ymm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 224(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 228(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm2, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm3, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 232(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 236(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %ymm4, %ymm9, %ymm3
# msg_sched done: 56-59
# set_w_k_xfer_4: 8
vpaddd 256(%rbp), %ymm0, %ymm4
vpaddd 288(%rbp), %ymm1, %ymm5
vmovdqu %ymm4, 256(%rsp)
vmovdqu %ymm5, 288(%rsp)
vpaddd 320(%rbp), %ymm2, %ymm4
vpaddd 352(%rbp), %ymm3, %ymm5
vmovdqu %ymm4, 320(%rsp)
vmovdqu %ymm5, 352(%rsp)
# msg_sched: 64-67
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm0, %ymm1, %ymm5
vpalignr $4, %ymm2, %ymm3, %ymm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 256(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 260(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm3, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm0, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 264(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 268(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %ymm4, %ymm9, %ymm0
# msg_sched done: 64-67
# msg_sched: 72-75
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm1, %ymm2, %ymm5
vpalignr $4, %ymm3, %ymm0, %ymm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 288(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 292(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm0, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm1, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 296(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 300(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %ymm4, %ymm9, %ymm1
# msg_sched done: 72-75
# msg_sched: 80-83
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm2, %ymm3, %ymm5
vpalignr $4, %ymm0, %ymm1, %ymm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 320(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 324(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm1, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm2, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 328(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 332(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %ymm4, %ymm9, %ymm2
# msg_sched done: 80-83
# msg_sched: 88-91
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm3, %ymm0, %ymm5
vpalignr $4, %ymm1, %ymm2, %ymm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 352(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 356(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm2, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm3, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 360(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 364(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %ymm4, %ymm9, %ymm3
# msg_sched done: 88-91
# set_w_k_xfer_4: 12
vpaddd 384(%rbp), %ymm0, %ymm4
vpaddd 416(%rbp), %ymm1, %ymm5
vmovdqu %ymm4, 384(%rsp)
vmovdqu %ymm5, 416(%rsp)
vpaddd 448(%rbp), %ymm2, %ymm4
vpaddd 480(%rbp), %ymm3, %ymm5
vmovdqu %ymm4, 448(%rsp)
vmovdqu %ymm5, 480(%rsp)
# rnd_all_4: 24-27
addl 384(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 388(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 392(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 396(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 26-29
addl 416(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 420(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 424(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 428(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
# rnd_all_4: 28-31
addl 448(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 452(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 456(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 460(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 30-33
addl 480(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 484(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 488(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 492(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
addl %r8d, (%rdi)
addl %r9d, 4(%rdi)
addl %r10d, 8(%rdi)
addl %r11d, 12(%rdi)
addl %r12d, 16(%rdi)
addl %r13d, 20(%rdi)
addl %r14d, 24(%rdi)
addl %r15d, 28(%rdi)
xorq %rax, %rax
vzeroupper
addq $0x200, %rsp
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha256_AVX2,.-Transform_Sha256_AVX2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl Transform_Sha256_AVX2_Len
.type Transform_Sha256_AVX2_Len,@function
.align 16
Transform_Sha256_AVX2_Len:
#else
.section __TEXT,__text
.globl _Transform_Sha256_AVX2_Len
.p2align 4
_Transform_Sha256_AVX2_Len:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
movq %rdx, %rbp
subq $0x204, %rsp
testb $0x40, %bpl
movl %ebp, 512(%rsp)
je L_sha256_len_avx2_block
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovups %ymm0, 32(%rdi)
vmovups %ymm1, 64(%rdi)
#ifndef __APPLE__
call Transform_Sha256_AVX2@plt
#else
call _Transform_Sha256_AVX2
#endif /* __APPLE__ */
addq $0x40, %rsi
subl $0x40, 512(%rsp)
jz L_sha256_len_avx2_done
L_sha256_len_avx2_block:
leaq L_avx2_sha256_k(%rip), %rbp
vmovdqa L_avx2_sha256_flip_mask(%rip), %ymm13
vmovdqa L_avx2_sha256_shuf_00BA(%rip), %ymm11
vmovdqa L_avx2_sha256_shuf_DC00(%rip), %ymm12
movl (%rdi), %r8d
movl 4(%rdi), %r9d
movl 8(%rdi), %r10d
movl 12(%rdi), %r11d
movl 16(%rdi), %r12d
movl 20(%rdi), %r13d
movl 24(%rdi), %r14d
movl 28(%rdi), %r15d
# Start of loop processing two blocks
L_sha256_len_avx2_start:
# X0, X1, X2, X3 = W[0..15]
vmovdqu (%rsi), %xmm0
vmovdqu 16(%rsi), %xmm1
vmovdqu 64(%rsi), %xmm4
vmovdqu 80(%rsi), %xmm5
vinserti128 $0x01, %xmm4, %ymm0, %ymm0
vinserti128 $0x01, %xmm5, %ymm1, %ymm1
vpshufb %ymm13, %ymm0, %ymm0
vpshufb %ymm13, %ymm1, %ymm1
vmovdqu 32(%rsi), %xmm2
vmovdqu 48(%rsi), %xmm3
vmovdqu 96(%rsi), %xmm6
vmovdqu 112(%rsi), %xmm7
vinserti128 $0x01, %xmm6, %ymm2, %ymm2
vinserti128 $0x01, %xmm7, %ymm3, %ymm3
vpshufb %ymm13, %ymm2, %ymm2
vpshufb %ymm13, %ymm3, %ymm3
movl %r9d, %ebx
movl %r12d, %edx
xorl %r10d, %ebx
# set_w_k_xfer_4: 0
vpaddd (%rbp), %ymm0, %ymm4
vpaddd 32(%rbp), %ymm1, %ymm5
vmovdqu %ymm4, (%rsp)
vmovdqu %ymm5, 32(%rsp)
vpaddd 64(%rbp), %ymm2, %ymm4
vpaddd 96(%rbp), %ymm3, %ymm5
vmovdqu %ymm4, 64(%rsp)
vmovdqu %ymm5, 96(%rsp)
# msg_sched: 0-3
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm0, %ymm1, %ymm5
vpalignr $4, %ymm2, %ymm3, %ymm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl (%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 4(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm3, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm0, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 8(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 12(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %ymm4, %ymm9, %ymm0
# msg_sched done: 0-3
# msg_sched: 8-11
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm1, %ymm2, %ymm5
vpalignr $4, %ymm3, %ymm0, %ymm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 32(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 36(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm0, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm1, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 40(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 44(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %ymm4, %ymm9, %ymm1
# msg_sched done: 8-11
# msg_sched: 16-19
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm2, %ymm3, %ymm5
vpalignr $4, %ymm0, %ymm1, %ymm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 64(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 68(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm1, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm2, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 72(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 76(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %ymm4, %ymm9, %ymm2
# msg_sched done: 16-19
# msg_sched: 24-27
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm3, %ymm0, %ymm5
vpalignr $4, %ymm1, %ymm2, %ymm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 96(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 100(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm2, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm3, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 104(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 108(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %ymm4, %ymm9, %ymm3
# msg_sched done: 24-27
# set_w_k_xfer_4: 4
vpaddd 128(%rbp), %ymm0, %ymm4
vpaddd 160(%rbp), %ymm1, %ymm5
vmovdqu %ymm4, 128(%rsp)
vmovdqu %ymm5, 160(%rsp)
vpaddd 192(%rbp), %ymm2, %ymm4
vpaddd 224(%rbp), %ymm3, %ymm5
vmovdqu %ymm4, 192(%rsp)
vmovdqu %ymm5, 224(%rsp)
# msg_sched: 32-35
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm0, %ymm1, %ymm5
vpalignr $4, %ymm2, %ymm3, %ymm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 128(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 132(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm3, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm0, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 136(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 140(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %ymm4, %ymm9, %ymm0
# msg_sched done: 32-35
# msg_sched: 40-43
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm1, %ymm2, %ymm5
vpalignr $4, %ymm3, %ymm0, %ymm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 160(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 164(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm0, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm1, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 168(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 172(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %ymm4, %ymm9, %ymm1
# msg_sched done: 40-43
# msg_sched: 48-51
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm2, %ymm3, %ymm5
vpalignr $4, %ymm0, %ymm1, %ymm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 192(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 196(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm1, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm2, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 200(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 204(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %ymm4, %ymm9, %ymm2
# msg_sched done: 48-51
# msg_sched: 56-59
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm3, %ymm0, %ymm5
vpalignr $4, %ymm1, %ymm2, %ymm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 224(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 228(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm2, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm3, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 232(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 236(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %ymm4, %ymm9, %ymm3
# msg_sched done: 56-59
# set_w_k_xfer_4: 8
vpaddd 256(%rbp), %ymm0, %ymm4
vpaddd 288(%rbp), %ymm1, %ymm5
vmovdqu %ymm4, 256(%rsp)
vmovdqu %ymm5, 288(%rsp)
vpaddd 320(%rbp), %ymm2, %ymm4
vpaddd 352(%rbp), %ymm3, %ymm5
vmovdqu %ymm4, 320(%rsp)
vmovdqu %ymm5, 352(%rsp)
# msg_sched: 64-67
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm0, %ymm1, %ymm5
vpalignr $4, %ymm2, %ymm3, %ymm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 256(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 260(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm3, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm0, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 264(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 268(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %ymm4, %ymm9, %ymm0
# msg_sched done: 64-67
# msg_sched: 72-75
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm1, %ymm2, %ymm5
vpalignr $4, %ymm3, %ymm0, %ymm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 288(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 292(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm0, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm1, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 296(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 300(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %ymm4, %ymm9, %ymm1
# msg_sched done: 72-75
# msg_sched: 80-83
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm2, %ymm3, %ymm5
vpalignr $4, %ymm0, %ymm1, %ymm4
# rnd_0: 1 - 2
movl %r9d, %eax
movl %r13d, %ecx
addl 320(%rsp), %r15d
xorl %r14d, %ecx
xorl %r12d, %edx
andl %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r14d, %ecx
xorl %r12d, %edx
addl %ecx, %r15d
rorl $6, %edx
xorl %r8d, %eax
addl %edx, %r15d
movl %r8d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r8d, %ebx
movl %r12d, %ecx
addl 324(%rsp), %r14d
xorl %r13d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r11d, %edx
andl %r11d, %ecx
rorl $5, %edx
xorl %r13d, %ecx
xorl %r11d, %edx
addl %ecx, %r14d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm1, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r15d, %ebx
addl %edx, %r14d
movl %r15d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r15d, %ecx
xorl %r8d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm2, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r15d, %eax
movl %r11d, %ecx
addl 328(%rsp), %r13d
xorl %r12d, %ecx
xorl %r10d, %edx
andl %r10d, %ecx
rorl $5, %edx
xorl %r12d, %ecx
xorl %r10d, %edx
addl %ecx, %r13d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r14d, %eax
addl %edx, %r13d
movl %r14d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r14d, %ecx
xorl %r15d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r14d, %ebx
movl %r10d, %ecx
addl 332(%rsp), %r12d
xorl %r11d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r9d, %edx
andl %r9d, %ecx
rorl $5, %edx
xorl %r11d, %ecx
xorl %r9d, %edx
addl %ecx, %r12d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r13d, %ebx
addl %edx, %r12d
movl %r13d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r13d, %ecx
xorl %r14d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
vpaddd %ymm4, %ymm9, %ymm2
# msg_sched done: 80-83
# msg_sched: 88-91
# rnd_0: 0 - 0
rorl $14, %edx
vpalignr $4, %ymm3, %ymm0, %ymm5
vpalignr $4, %ymm1, %ymm2, %ymm4
# rnd_0: 1 - 2
movl %r13d, %eax
movl %r9d, %ecx
addl 352(%rsp), %r11d
xorl %r10d, %ecx
xorl %r8d, %edx
andl %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
vpslld $25, %ymm5, %ymm7
# rnd_0: 3 - 4
rorl $5, %edx
xorl %r10d, %ecx
xorl %r8d, %edx
addl %ecx, %r11d
rorl $6, %edx
xorl %r12d, %eax
addl %edx, %r11d
movl %r12d, %ecx
vpsrld $18, %ymm5, %ymm8
vpslld $14, %ymm5, %ymm9
# rnd_0: 5 - 6
andl %eax, %ebx
rorl $9, %ecx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
vpor %ymm6, %ymm7, %ymm6
vpor %ymm8, %ymm9, %ymm8
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
# rnd_1: 0 - 1
rorl $14, %edx
movl %r12d, %ebx
movl %r8d, %ecx
addl 356(%rsp), %r10d
xorl %r9d, %ecx
vpsrld $3, %ymm5, %ymm9
vpxor %ymm6, %ymm8, %ymm6
# rnd_1: 2 - 3
xorl %r15d, %edx
andl %r15d, %ecx
rorl $5, %edx
xorl %r9d, %ecx
xorl %r15d, %edx
addl %ecx, %r10d
vpxor %ymm6, %ymm9, %ymm5
vpshufd $0xfa, %ymm2, %ymm6
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r11d, %ebx
addl %edx, %r10d
movl %r11d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r11d, %ecx
xorl %r12d, %eax
vpsrld $10, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 6 - 7
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
# rnd_0: 0 - 0
rorl $14, %edx
vpsrlq $0x11, %ymm6, %ymm6
vpaddd %ymm3, %ymm4, %ymm4
# rnd_0: 1 - 3
movl %r11d, %eax
movl %r15d, %ecx
addl 360(%rsp), %r9d
xorl %r8d, %ecx
xorl %r14d, %edx
andl %r14d, %ecx
rorl $5, %edx
xorl %r8d, %ecx
xorl %r14d, %edx
addl %ecx, %r9d
vpxor %ymm6, %ymm7, %ymm6
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 4 - 4
rorl $6, %edx
xorl %r10d, %eax
addl %edx, %r9d
movl %r10d, %ecx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 5 - 5
andl %eax, %ebx
rorl $9, %ecx
xorl %r10d, %ecx
xorl %r11d, %ebx
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 6 - 6
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 7 - 7
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
# rnd_1: 0 - 0
rorl $14, %edx
vpshufd $0x50, %ymm4, %ymm6
# rnd_1: 1 - 1
movl %r10d, %ebx
movl %r14d, %ecx
addl 364(%rsp), %r8d
xorl %r15d, %ecx
vpsrlq $0x11, %ymm6, %ymm8
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 2 - 3
xorl %r13d, %edx
andl %r13d, %ecx
rorl $5, %edx
xorl %r15d, %ecx
xorl %r13d, %edx
addl %ecx, %r8d
vpsrld $10, %ymm6, %ymm9
vpxor %ymm8, %ymm7, %ymm8
# rnd_1: 4 - 5
rorl $6, %edx
xorl %r9d, %ebx
addl %edx, %r8d
movl %r9d, %ecx
andl %ebx, %eax
rorl $9, %ecx
xorl %r9d, %ecx
xorl %r10d, %eax
vpxor %ymm9, %ymm8, %ymm9
# rnd_1: 6 - 6
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 7 - 7
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
vpaddd %ymm4, %ymm9, %ymm3
# msg_sched done: 88-91
# set_w_k_xfer_4: 12
vpaddd 384(%rbp), %ymm0, %ymm4
vpaddd 416(%rbp), %ymm1, %ymm5
vmovdqu %ymm4, 384(%rsp)
vmovdqu %ymm5, 416(%rsp)
vpaddd 448(%rbp), %ymm2, %ymm4
vpaddd 480(%rbp), %ymm3, %ymm5
vmovdqu %ymm4, 448(%rsp)
vmovdqu %ymm5, 480(%rsp)
# rnd_all_4: 24-27
addl 384(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 388(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 392(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 396(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 26-29
addl 416(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 420(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 424(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 428(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
# rnd_all_4: 28-31
addl 448(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 452(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 456(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 460(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 30-33
addl 480(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 484(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 488(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 492(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
addl (%rdi), %r8d
addl 4(%rdi), %r9d
addl 8(%rdi), %r10d
addl 12(%rdi), %r11d
addl 16(%rdi), %r12d
addl 20(%rdi), %r13d
addl 24(%rdi), %r14d
addl 28(%rdi), %r15d
movl %r8d, (%rdi)
movl %r9d, 4(%rdi)
movl %r10d, 8(%rdi)
movl %r11d, 12(%rdi)
movl %r12d, 16(%rdi)
movl %r13d, 20(%rdi)
movl %r14d, 24(%rdi)
movl %r15d, 28(%rdi)
movl %r9d, %ebx
movl %r12d, %edx
xorl %r10d, %ebx
# rnd_all_4: 1-4
addl 16(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 20(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 24(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 28(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 3-6
addl 48(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 52(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 56(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 60(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
# rnd_all_4: 5-8
addl 80(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 84(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 88(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 92(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 7-10
addl 112(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 116(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 120(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 124(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
# rnd_all_4: 9-12
addl 144(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 148(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 152(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 156(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 11-14
addl 176(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 180(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 184(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 188(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
# rnd_all_4: 13-16
addl 208(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 212(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 216(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 220(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 15-18
addl 240(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 244(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 248(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 252(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
# rnd_all_4: 17-20
addl 272(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 276(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 280(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 284(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 19-22
addl 304(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 308(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 312(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 316(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
# rnd_all_4: 21-24
addl 336(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 340(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 344(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 348(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 23-26
addl 368(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 372(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 376(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 380(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
# rnd_all_4: 25-28
addl 400(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 404(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 408(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 412(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 27-30
addl 432(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 436(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 440(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 444(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
# rnd_all_4: 29-32
addl 464(%rsp), %r15d
movl %r13d, %ecx
movl %r9d, %eax
xorl %r14d, %ecx
rorl $14, %edx
andl %r12d, %ecx
xorl %r12d, %edx
xorl %r14d, %ecx
rorl $5, %edx
addl %ecx, %r15d
xorl %r12d, %edx
xorl %r8d, %eax
rorl $6, %edx
movl %r8d, %ecx
addl %edx, %r15d
rorl $9, %ecx
andl %eax, %ebx
xorl %r8d, %ecx
xorl %r9d, %ebx
rorl $11, %ecx
addl %r15d, %r11d
xorl %r8d, %ecx
addl %ebx, %r15d
rorl $2, %ecx
movl %r11d, %edx
addl %ecx, %r15d
addl 468(%rsp), %r14d
movl %r12d, %ecx
movl %r8d, %ebx
xorl %r13d, %ecx
rorl $14, %edx
andl %r11d, %ecx
xorl %r11d, %edx
xorl %r13d, %ecx
rorl $5, %edx
addl %ecx, %r14d
xorl %r11d, %edx
xorl %r15d, %ebx
rorl $6, %edx
movl %r15d, %ecx
addl %edx, %r14d
rorl $9, %ecx
andl %ebx, %eax
xorl %r15d, %ecx
xorl %r8d, %eax
rorl $11, %ecx
addl %r14d, %r10d
xorl %r15d, %ecx
addl %eax, %r14d
rorl $2, %ecx
movl %r10d, %edx
addl %ecx, %r14d
addl 472(%rsp), %r13d
movl %r11d, %ecx
movl %r15d, %eax
xorl %r12d, %ecx
rorl $14, %edx
andl %r10d, %ecx
xorl %r10d, %edx
xorl %r12d, %ecx
rorl $5, %edx
addl %ecx, %r13d
xorl %r10d, %edx
xorl %r14d, %eax
rorl $6, %edx
movl %r14d, %ecx
addl %edx, %r13d
rorl $9, %ecx
andl %eax, %ebx
xorl %r14d, %ecx
xorl %r15d, %ebx
rorl $11, %ecx
addl %r13d, %r9d
xorl %r14d, %ecx
addl %ebx, %r13d
rorl $2, %ecx
movl %r9d, %edx
addl %ecx, %r13d
addl 476(%rsp), %r12d
movl %r10d, %ecx
movl %r14d, %ebx
xorl %r11d, %ecx
rorl $14, %edx
andl %r9d, %ecx
xorl %r9d, %edx
xorl %r11d, %ecx
rorl $5, %edx
addl %ecx, %r12d
xorl %r9d, %edx
xorl %r13d, %ebx
rorl $6, %edx
movl %r13d, %ecx
addl %edx, %r12d
rorl $9, %ecx
andl %ebx, %eax
xorl %r13d, %ecx
xorl %r14d, %eax
rorl $11, %ecx
addl %r12d, %r8d
xorl %r13d, %ecx
addl %eax, %r12d
rorl $2, %ecx
movl %r8d, %edx
addl %ecx, %r12d
# rnd_all_4: 31-34
addl 496(%rsp), %r11d
movl %r9d, %ecx
movl %r13d, %eax
xorl %r10d, %ecx
rorl $14, %edx
andl %r8d, %ecx
xorl %r8d, %edx
xorl %r10d, %ecx
rorl $5, %edx
addl %ecx, %r11d
xorl %r8d, %edx
xorl %r12d, %eax
rorl $6, %edx
movl %r12d, %ecx
addl %edx, %r11d
rorl $9, %ecx
andl %eax, %ebx
xorl %r12d, %ecx
xorl %r13d, %ebx
rorl $11, %ecx
addl %r11d, %r15d
xorl %r12d, %ecx
addl %ebx, %r11d
rorl $2, %ecx
movl %r15d, %edx
addl %ecx, %r11d
addl 500(%rsp), %r10d
movl %r8d, %ecx
movl %r12d, %ebx
xorl %r9d, %ecx
rorl $14, %edx
andl %r15d, %ecx
xorl %r15d, %edx
xorl %r9d, %ecx
rorl $5, %edx
addl %ecx, %r10d
xorl %r15d, %edx
xorl %r11d, %ebx
rorl $6, %edx
movl %r11d, %ecx
addl %edx, %r10d
rorl $9, %ecx
andl %ebx, %eax
xorl %r11d, %ecx
xorl %r12d, %eax
rorl $11, %ecx
addl %r10d, %r14d
xorl %r11d, %ecx
addl %eax, %r10d
rorl $2, %ecx
movl %r14d, %edx
addl %ecx, %r10d
addl 504(%rsp), %r9d
movl %r15d, %ecx
movl %r11d, %eax
xorl %r8d, %ecx
rorl $14, %edx
andl %r14d, %ecx
xorl %r14d, %edx
xorl %r8d, %ecx
rorl $5, %edx
addl %ecx, %r9d
xorl %r14d, %edx
xorl %r10d, %eax
rorl $6, %edx
movl %r10d, %ecx
addl %edx, %r9d
rorl $9, %ecx
andl %eax, %ebx
xorl %r10d, %ecx
xorl %r11d, %ebx
rorl $11, %ecx
addl %r9d, %r13d
xorl %r10d, %ecx
addl %ebx, %r9d
rorl $2, %ecx
movl %r13d, %edx
addl %ecx, %r9d
addl 508(%rsp), %r8d
movl %r14d, %ecx
movl %r10d, %ebx
xorl %r15d, %ecx
rorl $14, %edx
andl %r13d, %ecx
xorl %r13d, %edx
xorl %r15d, %ecx
rorl $5, %edx
addl %ecx, %r8d
xorl %r13d, %edx
xorl %r9d, %ebx
rorl $6, %edx
movl %r9d, %ecx
addl %edx, %r8d
rorl $9, %ecx
andl %ebx, %eax
xorl %r9d, %ecx
xorl %r10d, %eax
rorl $11, %ecx
addl %r8d, %r12d
xorl %r9d, %ecx
addl %eax, %r8d
rorl $2, %ecx
movl %r12d, %edx
addl %ecx, %r8d
addl (%rdi), %r8d
addl 4(%rdi), %r9d
addl 8(%rdi), %r10d
addl 12(%rdi), %r11d
addl 16(%rdi), %r12d
addl 20(%rdi), %r13d
addl 24(%rdi), %r14d
addl 28(%rdi), %r15d
addq $0x80, %rsi
subl $0x80, 512(%rsp)
movl %r8d, (%rdi)
movl %r9d, 4(%rdi)
movl %r10d, 8(%rdi)
movl %r11d, 12(%rdi)
movl %r12d, 16(%rdi)
movl %r13d, 20(%rdi)
movl %r14d, 24(%rdi)
movl %r15d, 28(%rdi)
jnz L_sha256_len_avx2_start
L_sha256_len_avx2_done:
xorq %rax, %rax
vzeroupper
addq $0x204, %rsp
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha256_AVX2_Len,.-Transform_Sha256_AVX2_Len
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_avx2_rorx_sha256_k:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0xfc19dc6,0x240ca1cc
.long 0xe49b69c1,0xefbe4786,0xfc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x6ca6351,0x14292967
.long 0xc6e00bf3,0xd5a79147,0x6ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_avx2_rorx_sha256_flip_mask:
.quad 0x405060700010203, 0xc0d0e0f08090a0b
.quad 0x405060700010203, 0xc0d0e0f08090a0b
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_avx2_rorx_sha256_shuf_00BA:
.quad 0xb0a090803020100, 0xffffffffffffffff
.quad 0xb0a090803020100, 0xffffffffffffffff
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_avx2_rorx_sha256_shuf_DC00:
.quad 0xffffffffffffffff, 0xb0a090803020100
.quad 0xffffffffffffffff, 0xb0a090803020100
#ifndef __APPLE__
.text
.globl Transform_Sha256_AVX2_RORX
.type Transform_Sha256_AVX2_RORX,@function
.align 16
Transform_Sha256_AVX2_RORX:
#else
.section __TEXT,__text
.globl _Transform_Sha256_AVX2_RORX
.p2align 4
_Transform_Sha256_AVX2_RORX:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
subq $0x200, %rsp
leaq L_avx2_rorx_sha256_k(%rip), %rbp
vmovdqa L_avx2_rorx_sha256_flip_mask(%rip), %xmm13
vmovdqa L_avx2_rorx_sha256_shuf_00BA(%rip), %ymm11
vmovdqa L_avx2_rorx_sha256_shuf_DC00(%rip), %ymm12
# X0, X1, X2, X3 = W[0..15]
vmovdqu (%rsi), %xmm0
vmovdqu 16(%rsi), %xmm1
vpshufb %xmm13, %xmm0, %xmm0
vpshufb %xmm13, %xmm1, %xmm1
vpaddd (%rbp), %ymm0, %ymm4
vpaddd 32(%rbp), %ymm1, %ymm5
vmovdqu %ymm4, (%rsp)
vmovdqu %ymm5, 32(%rsp)
vmovdqu 32(%rsi), %xmm2
vmovdqu 48(%rsi), %xmm3
vpshufb %xmm13, %xmm2, %xmm2
vpshufb %xmm13, %xmm3, %xmm3
vpaddd 64(%rbp), %ymm2, %ymm4
vpaddd 96(%rbp), %ymm3, %ymm5
vmovdqu %ymm4, 64(%rsp)
vmovdqu %ymm5, 96(%rsp)
movl (%rdi), %r8d
movl 4(%rdi), %r9d
movl 8(%rdi), %r10d
movl 12(%rdi), %r11d
movl 16(%rdi), %r12d
movl 20(%rdi), %r13d
movl 24(%rdi), %r14d
movl 28(%rdi), %r15d
movl %r9d, %ebx
rorxl $6, %r12d, %edx
xorl %r10d, %ebx
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl (%rsp), %r15d
vpalignr $4, %ymm0, %ymm1, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
vpalignr $4, %ymm2, %ymm3, %ymm4
# rnd_0: 2 - 2
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 4(%rsp), %r14d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpshufd $0xfa, %ymm3, %ymm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r14d, %r10d
movl %r8d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
vpaddd %ymm0, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 8(%rsp), %r13d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 12(%rsp), %r12d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpaddd %ymm4, %ymm9, %ymm0
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r12d, %r8d
movl %r14d, %ebx
vpaddd 128(%rbp), %ymm0, %ymm4
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vmovdqu %ymm4, 128(%rsp)
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 32(%rsp), %r11d
vpalignr $4, %ymm1, %ymm2, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
vpalignr $4, %ymm3, %ymm0, %ymm4
# rnd_0: 2 - 2
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 36(%rsp), %r10d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpshufd $0xfa, %ymm0, %ymm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r10d, %r14d
movl %r12d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
vpaddd %ymm1, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 40(%rsp), %r9d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 44(%rsp), %r8d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpaddd %ymm4, %ymm9, %ymm1
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r8d, %r12d
movl %r10d, %ebx
vpaddd 160(%rbp), %ymm1, %ymm4
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vmovdqu %ymm4, 160(%rsp)
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 64(%rsp), %r15d
vpalignr $4, %ymm2, %ymm3, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
vpalignr $4, %ymm0, %ymm1, %ymm4
# rnd_0: 2 - 2
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 68(%rsp), %r14d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpshufd $0xfa, %ymm1, %ymm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r14d, %r10d
movl %r8d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
vpaddd %ymm2, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 72(%rsp), %r13d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 76(%rsp), %r12d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpaddd %ymm4, %ymm9, %ymm2
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r12d, %r8d
movl %r14d, %ebx
vpaddd 192(%rbp), %ymm2, %ymm4
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vmovdqu %ymm4, 192(%rsp)
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 96(%rsp), %r11d
vpalignr $4, %ymm3, %ymm0, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
vpalignr $4, %ymm1, %ymm2, %ymm4
# rnd_0: 2 - 2
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 100(%rsp), %r10d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpshufd $0xfa, %ymm2, %ymm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r10d, %r14d
movl %r12d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
vpaddd %ymm3, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 104(%rsp), %r9d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 108(%rsp), %r8d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpaddd %ymm4, %ymm9, %ymm3
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r8d, %r12d
movl %r10d, %ebx
vpaddd 224(%rbp), %ymm3, %ymm4
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vmovdqu %ymm4, 224(%rsp)
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 128(%rsp), %r15d
vpalignr $4, %ymm0, %ymm1, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
vpalignr $4, %ymm2, %ymm3, %ymm4
# rnd_0: 2 - 2
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 132(%rsp), %r14d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpshufd $0xfa, %ymm3, %ymm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r14d, %r10d
movl %r8d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
vpaddd %ymm0, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 136(%rsp), %r13d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 140(%rsp), %r12d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpaddd %ymm4, %ymm9, %ymm0
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r12d, %r8d
movl %r14d, %ebx
vpaddd 256(%rbp), %ymm0, %ymm4
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vmovdqu %ymm4, 256(%rsp)
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 160(%rsp), %r11d
vpalignr $4, %ymm1, %ymm2, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
vpalignr $4, %ymm3, %ymm0, %ymm4
# rnd_0: 2 - 2
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 164(%rsp), %r10d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpshufd $0xfa, %ymm0, %ymm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r10d, %r14d
movl %r12d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
vpaddd %ymm1, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 168(%rsp), %r9d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 172(%rsp), %r8d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpaddd %ymm4, %ymm9, %ymm1
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r8d, %r12d
movl %r10d, %ebx
vpaddd 288(%rbp), %ymm1, %ymm4
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vmovdqu %ymm4, 288(%rsp)
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 192(%rsp), %r15d
vpalignr $4, %ymm2, %ymm3, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
vpalignr $4, %ymm0, %ymm1, %ymm4
# rnd_0: 2 - 2
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 196(%rsp), %r14d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpshufd $0xfa, %ymm1, %ymm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r14d, %r10d
movl %r8d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
vpaddd %ymm2, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 200(%rsp), %r13d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 204(%rsp), %r12d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpaddd %ymm4, %ymm9, %ymm2
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r12d, %r8d
movl %r14d, %ebx
vpaddd 320(%rbp), %ymm2, %ymm4
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vmovdqu %ymm4, 320(%rsp)
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 224(%rsp), %r11d
vpalignr $4, %ymm3, %ymm0, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
vpalignr $4, %ymm1, %ymm2, %ymm4
# rnd_0: 2 - 2
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 228(%rsp), %r10d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpshufd $0xfa, %ymm2, %ymm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r10d, %r14d
movl %r12d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
vpaddd %ymm3, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 232(%rsp), %r9d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 236(%rsp), %r8d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpaddd %ymm4, %ymm9, %ymm3
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r8d, %r12d
movl %r10d, %ebx
vpaddd 352(%rbp), %ymm3, %ymm4
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vmovdqu %ymm4, 352(%rsp)
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 256(%rsp), %r15d
vpalignr $4, %ymm0, %ymm1, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
vpalignr $4, %ymm2, %ymm3, %ymm4
# rnd_0: 2 - 2
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 260(%rsp), %r14d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpshufd $0xfa, %ymm3, %ymm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r14d, %r10d
movl %r8d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
vpaddd %ymm0, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 264(%rsp), %r13d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 268(%rsp), %r12d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpaddd %ymm4, %ymm9, %ymm0
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r12d, %r8d
movl %r14d, %ebx
vpaddd 384(%rbp), %ymm0, %ymm4
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vmovdqu %ymm4, 384(%rsp)
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 288(%rsp), %r11d
vpalignr $4, %ymm1, %ymm2, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
vpalignr $4, %ymm3, %ymm0, %ymm4
# rnd_0: 2 - 2
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 292(%rsp), %r10d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpshufd $0xfa, %ymm0, %ymm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r10d, %r14d
movl %r12d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
vpaddd %ymm1, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 296(%rsp), %r9d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 300(%rsp), %r8d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpaddd %ymm4, %ymm9, %ymm1
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r8d, %r12d
movl %r10d, %ebx
vpaddd 416(%rbp), %ymm1, %ymm4
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vmovdqu %ymm4, 416(%rsp)
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 320(%rsp), %r15d
vpalignr $4, %ymm2, %ymm3, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
vpalignr $4, %ymm0, %ymm1, %ymm4
# rnd_0: 2 - 2
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 324(%rsp), %r14d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpshufd $0xfa, %ymm1, %ymm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r14d, %r10d
movl %r8d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
vpaddd %ymm2, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 328(%rsp), %r13d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 332(%rsp), %r12d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpaddd %ymm4, %ymm9, %ymm2
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r12d, %r8d
movl %r14d, %ebx
vpaddd 448(%rbp), %ymm2, %ymm4
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vmovdqu %ymm4, 448(%rsp)
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 352(%rsp), %r11d
vpalignr $4, %ymm3, %ymm0, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
vpalignr $4, %ymm1, %ymm2, %ymm4
# rnd_0: 2 - 2
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 356(%rsp), %r10d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpshufd $0xfa, %ymm2, %ymm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r10d, %r14d
movl %r12d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
vpaddd %ymm3, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 360(%rsp), %r9d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 364(%rsp), %r8d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpaddd %ymm4, %ymm9, %ymm3
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r8d, %r12d
movl %r10d, %ebx
vpaddd 480(%rbp), %ymm3, %ymm4
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vmovdqu %ymm4, 480(%rsp)
xorl %eax, %eax
xorl %ecx, %ecx
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
leal (%r8,%rax,1), %r8d
addl 384(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 388(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
leal (%r10,%r14,1), %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
leal (%r14,%rax,1), %r14d
addl 392(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 396(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
leal (%r8,%r12,1), %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
leal (%r12,%rax,1), %r12d
addl 416(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 420(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
leal (%r14,%r10,1), %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
leal (%r10,%rax,1), %r10d
addl 424(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 428(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
leal (%r12,%r8,1), %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
leal (%r8,%rax,1), %r8d
addl 448(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 452(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
leal (%r10,%r14,1), %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
leal (%r14,%rax,1), %r14d
addl 456(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 460(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
leal (%r8,%r12,1), %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
leal (%r12,%rax,1), %r12d
addl 480(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 484(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
leal (%r14,%r10,1), %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
leal (%r10,%rax,1), %r10d
addl 488(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 492(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
leal (%r12,%r8,1), %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
addl %eax, %r8d
addl %r8d, (%rdi)
addl %r9d, 4(%rdi)
addl %r10d, 8(%rdi)
addl %r11d, 12(%rdi)
addl %r12d, 16(%rdi)
addl %r13d, 20(%rdi)
addl %r14d, 24(%rdi)
addl %r15d, 28(%rdi)
xorq %rax, %rax
vzeroupper
addq $0x200, %rsp
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha256_AVX2_RORX,.-Transform_Sha256_AVX2_RORX
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl Transform_Sha256_AVX2_RORX_Len
.type Transform_Sha256_AVX2_RORX_Len,@function
.align 16
Transform_Sha256_AVX2_RORX_Len:
#else
.section __TEXT,__text
.globl _Transform_Sha256_AVX2_RORX_Len
.p2align 4
_Transform_Sha256_AVX2_RORX_Len:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
movq %rdx, %rbp
subq $0x204, %rsp
testb $0x40, %bpl
movl %ebp, 512(%rsp)
je L_sha256_len_avx2_rorx_block
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovups %ymm0, 32(%rdi)
vmovups %ymm1, 64(%rdi)
#ifndef __APPLE__
call Transform_Sha256_AVX2_RORX@plt
#else
call _Transform_Sha256_AVX2_RORX
#endif /* __APPLE__ */
addq $0x40, %rsi
subl $0x40, 512(%rsp)
jz L_sha256_len_avx2_rorx_done
L_sha256_len_avx2_rorx_block:
leaq L_avx2_rorx_sha256_k(%rip), %rbp
vmovdqa L_avx2_rorx_sha256_flip_mask(%rip), %ymm13
vmovdqa L_avx2_rorx_sha256_shuf_00BA(%rip), %ymm11
vmovdqa L_avx2_rorx_sha256_shuf_DC00(%rip), %ymm12
movl (%rdi), %r8d
movl 4(%rdi), %r9d
movl 8(%rdi), %r10d
movl 12(%rdi), %r11d
movl 16(%rdi), %r12d
movl 20(%rdi), %r13d
movl 24(%rdi), %r14d
movl 28(%rdi), %r15d
# Start of loop processing two blocks
L_sha256_len_avx2_rorx_start:
# X0, X1, X2, X3 = W[0..15]
vmovdqu (%rsi), %xmm0
vmovdqu 16(%rsi), %xmm1
vinserti128 $0x01, 64(%rsi), %ymm0, %ymm0
vinserti128 $0x01, 80(%rsi), %ymm1, %ymm1
vpshufb %ymm13, %ymm0, %ymm0
vpshufb %ymm13, %ymm1, %ymm1
vpaddd (%rbp), %ymm0, %ymm4
vpaddd 32(%rbp), %ymm1, %ymm5
vmovdqu %ymm4, (%rsp)
vmovdqu %ymm5, 32(%rsp)
vmovdqu 32(%rsi), %xmm2
vmovdqu 48(%rsi), %xmm3
vinserti128 $0x01, 96(%rsi), %ymm2, %ymm2
vinserti128 $0x01, 112(%rsi), %ymm3, %ymm3
vpshufb %ymm13, %ymm2, %ymm2
vpshufb %ymm13, %ymm3, %ymm3
vpaddd 64(%rbp), %ymm2, %ymm4
vpaddd 96(%rbp), %ymm3, %ymm5
vmovdqu %ymm4, 64(%rsp)
vmovdqu %ymm5, 96(%rsp)
movl %r9d, %ebx
rorxl $6, %r12d, %edx
xorl %r10d, %ebx
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl (%rsp), %r15d
vpalignr $4, %ymm0, %ymm1, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
vpalignr $4, %ymm2, %ymm3, %ymm4
# rnd_0: 2 - 2
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 4(%rsp), %r14d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpshufd $0xfa, %ymm3, %ymm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r14d, %r10d
movl %r8d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
vpaddd %ymm0, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 8(%rsp), %r13d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 12(%rsp), %r12d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpaddd %ymm4, %ymm9, %ymm0
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r12d, %r8d
movl %r14d, %ebx
vpaddd 128(%rbp), %ymm0, %ymm4
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vmovdqu %ymm4, 128(%rsp)
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 32(%rsp), %r11d
vpalignr $4, %ymm1, %ymm2, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
vpalignr $4, %ymm3, %ymm0, %ymm4
# rnd_0: 2 - 2
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 36(%rsp), %r10d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpshufd $0xfa, %ymm0, %ymm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r10d, %r14d
movl %r12d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
vpaddd %ymm1, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 40(%rsp), %r9d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 44(%rsp), %r8d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpaddd %ymm4, %ymm9, %ymm1
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r8d, %r12d
movl %r10d, %ebx
vpaddd 160(%rbp), %ymm1, %ymm4
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vmovdqu %ymm4, 160(%rsp)
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 64(%rsp), %r15d
vpalignr $4, %ymm2, %ymm3, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
vpalignr $4, %ymm0, %ymm1, %ymm4
# rnd_0: 2 - 2
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 68(%rsp), %r14d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpshufd $0xfa, %ymm1, %ymm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r14d, %r10d
movl %r8d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
vpaddd %ymm2, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 72(%rsp), %r13d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 76(%rsp), %r12d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpaddd %ymm4, %ymm9, %ymm2
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r12d, %r8d
movl %r14d, %ebx
vpaddd 192(%rbp), %ymm2, %ymm4
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vmovdqu %ymm4, 192(%rsp)
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 96(%rsp), %r11d
vpalignr $4, %ymm3, %ymm0, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
vpalignr $4, %ymm1, %ymm2, %ymm4
# rnd_0: 2 - 2
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 100(%rsp), %r10d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpshufd $0xfa, %ymm2, %ymm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r10d, %r14d
movl %r12d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
vpaddd %ymm3, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 104(%rsp), %r9d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 108(%rsp), %r8d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpaddd %ymm4, %ymm9, %ymm3
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r8d, %r12d
movl %r10d, %ebx
vpaddd 224(%rbp), %ymm3, %ymm4
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vmovdqu %ymm4, 224(%rsp)
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 128(%rsp), %r15d
vpalignr $4, %ymm0, %ymm1, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
vpalignr $4, %ymm2, %ymm3, %ymm4
# rnd_0: 2 - 2
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 132(%rsp), %r14d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpshufd $0xfa, %ymm3, %ymm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r14d, %r10d
movl %r8d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
vpaddd %ymm0, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 136(%rsp), %r13d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 140(%rsp), %r12d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpaddd %ymm4, %ymm9, %ymm0
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r12d, %r8d
movl %r14d, %ebx
vpaddd 256(%rbp), %ymm0, %ymm4
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vmovdqu %ymm4, 256(%rsp)
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 160(%rsp), %r11d
vpalignr $4, %ymm1, %ymm2, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
vpalignr $4, %ymm3, %ymm0, %ymm4
# rnd_0: 2 - 2
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 164(%rsp), %r10d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpshufd $0xfa, %ymm0, %ymm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r10d, %r14d
movl %r12d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
vpaddd %ymm1, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 168(%rsp), %r9d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 172(%rsp), %r8d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpaddd %ymm4, %ymm9, %ymm1
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r8d, %r12d
movl %r10d, %ebx
vpaddd 288(%rbp), %ymm1, %ymm4
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vmovdqu %ymm4, 288(%rsp)
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 192(%rsp), %r15d
vpalignr $4, %ymm2, %ymm3, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
vpalignr $4, %ymm0, %ymm1, %ymm4
# rnd_0: 2 - 2
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 196(%rsp), %r14d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpshufd $0xfa, %ymm1, %ymm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r14d, %r10d
movl %r8d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
vpaddd %ymm2, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 200(%rsp), %r13d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 204(%rsp), %r12d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpaddd %ymm4, %ymm9, %ymm2
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r12d, %r8d
movl %r14d, %ebx
vpaddd 320(%rbp), %ymm2, %ymm4
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vmovdqu %ymm4, 320(%rsp)
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 224(%rsp), %r11d
vpalignr $4, %ymm3, %ymm0, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
vpalignr $4, %ymm1, %ymm2, %ymm4
# rnd_0: 2 - 2
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 228(%rsp), %r10d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpshufd $0xfa, %ymm2, %ymm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r10d, %r14d
movl %r12d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
vpaddd %ymm3, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 232(%rsp), %r9d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 236(%rsp), %r8d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpaddd %ymm4, %ymm9, %ymm3
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r8d, %r12d
movl %r10d, %ebx
vpaddd 352(%rbp), %ymm3, %ymm4
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vmovdqu %ymm4, 352(%rsp)
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 256(%rsp), %r15d
vpalignr $4, %ymm0, %ymm1, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
vpalignr $4, %ymm2, %ymm3, %ymm4
# rnd_0: 2 - 2
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 260(%rsp), %r14d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpshufd $0xfa, %ymm3, %ymm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r14d, %r10d
movl %r8d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
vpaddd %ymm0, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 264(%rsp), %r13d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 268(%rsp), %r12d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpaddd %ymm4, %ymm9, %ymm0
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r12d, %r8d
movl %r14d, %ebx
vpaddd 384(%rbp), %ymm0, %ymm4
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vmovdqu %ymm4, 384(%rsp)
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 288(%rsp), %r11d
vpalignr $4, %ymm1, %ymm2, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
vpalignr $4, %ymm3, %ymm0, %ymm4
# rnd_0: 2 - 2
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 292(%rsp), %r10d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpshufd $0xfa, %ymm0, %ymm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r10d, %r14d
movl %r12d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
vpaddd %ymm1, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 296(%rsp), %r9d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 300(%rsp), %r8d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpaddd %ymm4, %ymm9, %ymm1
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r8d, %r12d
movl %r10d, %ebx
vpaddd 416(%rbp), %ymm1, %ymm4
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vmovdqu %ymm4, 416(%rsp)
# rnd_0: 0 - 0
movl %r13d, %eax
rorxl $11, %r12d, %ecx
addl 320(%rsp), %r15d
vpalignr $4, %ymm2, %ymm3, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
vpalignr $4, %ymm0, %ymm1, %ymm4
# rnd_0: 2 - 2
andl %r12d, %eax
xorl %ecx, %edx
rorxl $13, %r8d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r15d
rorxl $2, %r8d, %edx
xorl %r14d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r8d, %eax
addl %edx, %r15d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
addl %ebx, %r15d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r12d, %ebx
rorxl $11, %r11d, %ecx
addl 324(%rsp), %r14d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r11d, %ebx
xorl %ecx, %edx
rorxl $13, %r15d, %ecx
vpshufd $0xfa, %ymm1, %ymm7
# rnd_1: 3 - 3
addl %edx, %r14d
rorxl $2, %r15d, %edx
xorl %r13d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r14d, %r10d
movl %r8d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r15d, %ebx
addl %edx, %r14d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r8d, %eax
rorxl $6, %r10d, %edx
addl %eax, %r14d
vpaddd %ymm2, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r11d, %eax
rorxl $11, %r10d, %ecx
addl 328(%rsp), %r13d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r10d, %eax
xorl %ecx, %edx
rorxl $13, %r14d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r13d
rorxl $2, %r14d, %edx
xorl %r12d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r14d, %eax
addl %edx, %r13d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
addl %ebx, %r13d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r10d, %ebx
rorxl $11, %r9d, %ecx
addl 332(%rsp), %r12d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r9d, %ebx
xorl %ecx, %edx
rorxl $13, %r13d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r12d
rorxl $2, %r13d, %edx
xorl %r11d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
vpaddd %ymm4, %ymm9, %ymm2
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r12d, %r8d
movl %r14d, %ebx
vpaddd 448(%rbp), %ymm2, %ymm4
# rnd_1: 6 - 6
xorl %r13d, %ebx
addl %edx, %r12d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r14d, %eax
rorxl $6, %r8d, %edx
addl %eax, %r12d
vmovdqu %ymm4, 448(%rsp)
# rnd_0: 0 - 0
movl %r9d, %eax
rorxl $11, %r8d, %ecx
addl 352(%rsp), %r11d
vpalignr $4, %ymm3, %ymm0, %ymm5
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
vpalignr $4, %ymm1, %ymm2, %ymm4
# rnd_0: 2 - 2
andl %r8d, %eax
xorl %ecx, %edx
rorxl $13, %r12d, %ecx
vpsrld $7, %ymm5, %ymm6
# rnd_0: 3 - 3
addl %edx, %r11d
rorxl $2, %r12d, %edx
xorl %r10d, %eax
vpslld $25, %ymm5, %ymm7
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
vpsrld $18, %ymm5, %ymm8
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
vpslld $14, %ymm5, %ymm9
# rnd_0: 6 - 6
xorl %r12d, %eax
addl %edx, %r11d
andl %eax, %ebx
vpor %ymm7, %ymm6, %ymm6
# rnd_0: 7 - 7
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
addl %ebx, %r11d
vpor %ymm9, %ymm8, %ymm8
# rnd_1: 0 - 0
movl %r8d, %ebx
rorxl $11, %r15d, %ecx
addl 356(%rsp), %r10d
vpsrld $3, %ymm5, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
vpxor %ymm8, %ymm6, %ymm6
# rnd_1: 2 - 2
andl %r15d, %ebx
xorl %ecx, %edx
rorxl $13, %r11d, %ecx
vpshufd $0xfa, %ymm2, %ymm7
# rnd_1: 3 - 3
addl %edx, %r10d
rorxl $2, %r11d, %edx
xorl %r9d, %ebx
vpxor %ymm6, %ymm9, %ymm5
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
vpsrld $10, %ymm7, %ymm8
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r10d, %r14d
movl %r12d, %ebx
vpsrlq $19, %ymm7, %ymm6
# rnd_1: 6 - 6
xorl %r11d, %ebx
addl %edx, %r10d
andl %ebx, %eax
vpsrlq $0x11, %ymm7, %ymm7
# rnd_1: 7 - 7
xorl %r12d, %eax
rorxl $6, %r14d, %edx
addl %eax, %r10d
vpaddd %ymm3, %ymm4, %ymm4
# rnd_0: 0 - 0
movl %r15d, %eax
rorxl $11, %r14d, %ecx
addl 360(%rsp), %r9d
vpxor %ymm7, %ymm6, %ymm6
# rnd_0: 1 - 1
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
vpxor %ymm6, %ymm8, %ymm8
# rnd_0: 2 - 2
andl %r14d, %eax
xorl %ecx, %edx
rorxl $13, %r10d, %ecx
vpaddd %ymm5, %ymm4, %ymm4
# rnd_0: 3 - 3
addl %edx, %r9d
rorxl $2, %r10d, %edx
xorl %r8d, %eax
vpshufb %ymm11, %ymm8, %ymm8
# rnd_0: 4 - 4
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
vpaddd %ymm8, %ymm4, %ymm4
# rnd_0: 5 - 5
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
vpshufd $0x50, %ymm4, %ymm6
# rnd_0: 6 - 6
xorl %r10d, %eax
addl %edx, %r9d
andl %eax, %ebx
vpsrlq $0x11, %ymm6, %ymm8
# rnd_0: 7 - 7
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
addl %ebx, %r9d
vpsrlq $19, %ymm6, %ymm7
# rnd_1: 0 - 0
movl %r14d, %ebx
rorxl $11, %r13d, %ecx
addl 364(%rsp), %r8d
vpsrld $10, %ymm6, %ymm9
# rnd_1: 1 - 1
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
vpxor %ymm7, %ymm8, %ymm8
# rnd_1: 2 - 2
andl %r13d, %ebx
xorl %ecx, %edx
rorxl $13, %r9d, %ecx
vpxor %ymm8, %ymm9, %ymm9
# rnd_1: 3 - 3
addl %edx, %r8d
rorxl $2, %r9d, %edx
xorl %r15d, %ebx
vpshufb %ymm12, %ymm9, %ymm9
# rnd_1: 4 - 4
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
vpaddd %ymm4, %ymm9, %ymm3
# rnd_1: 5 - 5
xorl %ecx, %edx
addl %r8d, %r12d
movl %r10d, %ebx
vpaddd 480(%rbp), %ymm3, %ymm4
# rnd_1: 6 - 6
xorl %r9d, %ebx
addl %edx, %r8d
andl %ebx, %eax
# rnd_1: 7 - 7
xorl %r10d, %eax
rorxl $6, %r12d, %edx
addl %eax, %r8d
vmovdqu %ymm4, 480(%rsp)
xorl %eax, %eax
xorl %ecx, %ecx
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
leal (%r8,%rax,1), %r8d
addl 384(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 388(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
leal (%r10,%r14,1), %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
leal (%r14,%rax,1), %r14d
addl 392(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 396(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
leal (%r8,%r12,1), %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
leal (%r12,%rax,1), %r12d
addl 416(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 420(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
leal (%r14,%r10,1), %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
leal (%r10,%rax,1), %r10d
addl 424(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 428(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
leal (%r12,%r8,1), %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
leal (%r8,%rax,1), %r8d
addl 448(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 452(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
leal (%r10,%r14,1), %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
leal (%r14,%rax,1), %r14d
addl 456(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 460(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
leal (%r8,%r12,1), %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
leal (%r12,%rax,1), %r12d
addl 480(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 484(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
leal (%r14,%r10,1), %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
leal (%r10,%rax,1), %r10d
addl 488(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 492(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
leal (%r12,%r8,1), %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
addl %eax, %r8d
xorl %ecx, %ecx
addl (%rdi), %r8d
addl 4(%rdi), %r9d
addl 8(%rdi), %r10d
addl 12(%rdi), %r11d
addl 16(%rdi), %r12d
addl 20(%rdi), %r13d
addl 24(%rdi), %r14d
addl 28(%rdi), %r15d
movl %r8d, (%rdi)
movl %r9d, 4(%rdi)
movl %r10d, 8(%rdi)
movl %r11d, 12(%rdi)
movl %r12d, 16(%rdi)
movl %r13d, 20(%rdi)
movl %r14d, 24(%rdi)
movl %r15d, 28(%rdi)
movl %r9d, %ebx
xorl %eax, %eax
xorl %r10d, %ebx
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
leal (%r8,%rax,1), %r8d
addl 16(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 20(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
leal (%r10,%r14,1), %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
leal (%r14,%rax,1), %r14d
addl 24(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 28(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
leal (%r8,%r12,1), %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
leal (%r12,%rax,1), %r12d
addl 48(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 52(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
leal (%r14,%r10,1), %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
leal (%r10,%rax,1), %r10d
addl 56(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 60(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
leal (%r12,%r8,1), %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
leal (%r8,%rax,1), %r8d
addl 80(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 84(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
leal (%r10,%r14,1), %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
leal (%r14,%rax,1), %r14d
addl 88(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 92(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
leal (%r8,%r12,1), %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
leal (%r12,%rax,1), %r12d
addl 112(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 116(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
leal (%r14,%r10,1), %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
leal (%r10,%rax,1), %r10d
addl 120(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 124(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
leal (%r12,%r8,1), %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
leal (%r8,%rax,1), %r8d
addl 144(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 148(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
leal (%r10,%r14,1), %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
leal (%r14,%rax,1), %r14d
addl 152(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 156(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
leal (%r8,%r12,1), %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
leal (%r12,%rax,1), %r12d
addl 176(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 180(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
leal (%r14,%r10,1), %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
leal (%r10,%rax,1), %r10d
addl 184(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 188(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
leal (%r12,%r8,1), %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
leal (%r8,%rax,1), %r8d
addl 208(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 212(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
leal (%r10,%r14,1), %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
leal (%r14,%rax,1), %r14d
addl 216(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 220(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
leal (%r8,%r12,1), %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
leal (%r12,%rax,1), %r12d
addl 240(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 244(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
leal (%r14,%r10,1), %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
leal (%r10,%rax,1), %r10d
addl 248(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 252(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
leal (%r12,%r8,1), %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
leal (%r8,%rax,1), %r8d
addl 272(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 276(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
leal (%r10,%r14,1), %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
leal (%r14,%rax,1), %r14d
addl 280(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 284(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
leal (%r8,%r12,1), %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
leal (%r12,%rax,1), %r12d
addl 304(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 308(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
leal (%r14,%r10,1), %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
leal (%r10,%rax,1), %r10d
addl 312(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 316(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
leal (%r12,%r8,1), %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
leal (%r8,%rax,1), %r8d
addl 336(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 340(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
leal (%r10,%r14,1), %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
leal (%r14,%rax,1), %r14d
addl 344(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 348(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
leal (%r8,%r12,1), %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
leal (%r12,%rax,1), %r12d
addl 368(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 372(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
leal (%r14,%r10,1), %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
leal (%r10,%rax,1), %r10d
addl 376(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 380(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
leal (%r12,%r8,1), %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
leal (%r8,%rax,1), %r8d
addl 400(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 404(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
leal (%r10,%r14,1), %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
leal (%r14,%rax,1), %r14d
addl 408(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 412(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
leal (%r8,%r12,1), %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
leal (%r12,%rax,1), %r12d
addl 432(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 436(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
leal (%r14,%r10,1), %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
leal (%r10,%rax,1), %r10d
addl 440(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 444(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
leal (%r12,%r8,1), %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
rorxl $6, %r12d, %edx
rorxl $11, %r12d, %ecx
leal (%r8,%rax,1), %r8d
addl 464(%rsp), %r15d
movl %r13d, %eax
xorl %edx, %ecx
xorl %r14d, %eax
rorxl $25, %r12d, %edx
xorl %ecx, %edx
andl %r12d, %eax
addl %edx, %r15d
rorxl $2, %r8d, %edx
rorxl $13, %r8d, %ecx
xorl %r14d, %eax
xorl %edx, %ecx
rorxl $22, %r8d, %edx
addl %eax, %r15d
xorl %ecx, %edx
movl %r9d, %eax
addl %r15d, %r11d
xorl %r8d, %eax
andl %eax, %ebx
addl %edx, %r15d
xorl %r9d, %ebx
rorxl $6, %r11d, %edx
rorxl $11, %r11d, %ecx
addl %ebx, %r15d
addl 468(%rsp), %r14d
movl %r12d, %ebx
xorl %edx, %ecx
xorl %r13d, %ebx
rorxl $25, %r11d, %edx
xorl %ecx, %edx
andl %r11d, %ebx
addl %edx, %r14d
rorxl $2, %r15d, %edx
rorxl $13, %r15d, %ecx
xorl %r13d, %ebx
xorl %edx, %ecx
rorxl $22, %r15d, %edx
addl %ebx, %r14d
xorl %ecx, %edx
movl %r8d, %ebx
leal (%r10,%r14,1), %r10d
xorl %r15d, %ebx
andl %ebx, %eax
addl %edx, %r14d
xorl %r8d, %eax
rorxl $6, %r10d, %edx
rorxl $11, %r10d, %ecx
leal (%r14,%rax,1), %r14d
addl 472(%rsp), %r13d
movl %r11d, %eax
xorl %edx, %ecx
xorl %r12d, %eax
rorxl $25, %r10d, %edx
xorl %ecx, %edx
andl %r10d, %eax
addl %edx, %r13d
rorxl $2, %r14d, %edx
rorxl $13, %r14d, %ecx
xorl %r12d, %eax
xorl %edx, %ecx
rorxl $22, %r14d, %edx
addl %eax, %r13d
xorl %ecx, %edx
movl %r15d, %eax
addl %r13d, %r9d
xorl %r14d, %eax
andl %eax, %ebx
addl %edx, %r13d
xorl %r15d, %ebx
rorxl $6, %r9d, %edx
rorxl $11, %r9d, %ecx
addl %ebx, %r13d
addl 476(%rsp), %r12d
movl %r10d, %ebx
xorl %edx, %ecx
xorl %r11d, %ebx
rorxl $25, %r9d, %edx
xorl %ecx, %edx
andl %r9d, %ebx
addl %edx, %r12d
rorxl $2, %r13d, %edx
rorxl $13, %r13d, %ecx
xorl %r11d, %ebx
xorl %edx, %ecx
rorxl $22, %r13d, %edx
addl %ebx, %r12d
xorl %ecx, %edx
movl %r14d, %ebx
leal (%r8,%r12,1), %r8d
xorl %r13d, %ebx
andl %ebx, %eax
addl %edx, %r12d
xorl %r14d, %eax
rorxl $6, %r8d, %edx
rorxl $11, %r8d, %ecx
leal (%r12,%rax,1), %r12d
addl 496(%rsp), %r11d
movl %r9d, %eax
xorl %edx, %ecx
xorl %r10d, %eax
rorxl $25, %r8d, %edx
xorl %ecx, %edx
andl %r8d, %eax
addl %edx, %r11d
rorxl $2, %r12d, %edx
rorxl $13, %r12d, %ecx
xorl %r10d, %eax
xorl %edx, %ecx
rorxl $22, %r12d, %edx
addl %eax, %r11d
xorl %ecx, %edx
movl %r13d, %eax
addl %r11d, %r15d
xorl %r12d, %eax
andl %eax, %ebx
addl %edx, %r11d
xorl %r13d, %ebx
rorxl $6, %r15d, %edx
rorxl $11, %r15d, %ecx
addl %ebx, %r11d
addl 500(%rsp), %r10d
movl %r8d, %ebx
xorl %edx, %ecx
xorl %r9d, %ebx
rorxl $25, %r15d, %edx
xorl %ecx, %edx
andl %r15d, %ebx
addl %edx, %r10d
rorxl $2, %r11d, %edx
rorxl $13, %r11d, %ecx
xorl %r9d, %ebx
xorl %edx, %ecx
rorxl $22, %r11d, %edx
addl %ebx, %r10d
xorl %ecx, %edx
movl %r12d, %ebx
leal (%r14,%r10,1), %r14d
xorl %r11d, %ebx
andl %ebx, %eax
addl %edx, %r10d
xorl %r12d, %eax
rorxl $6, %r14d, %edx
rorxl $11, %r14d, %ecx
leal (%r10,%rax,1), %r10d
addl 504(%rsp), %r9d
movl %r15d, %eax
xorl %edx, %ecx
xorl %r8d, %eax
rorxl $25, %r14d, %edx
xorl %ecx, %edx
andl %r14d, %eax
addl %edx, %r9d
rorxl $2, %r10d, %edx
rorxl $13, %r10d, %ecx
xorl %r8d, %eax
xorl %edx, %ecx
rorxl $22, %r10d, %edx
addl %eax, %r9d
xorl %ecx, %edx
movl %r11d, %eax
addl %r9d, %r13d
xorl %r10d, %eax
andl %eax, %ebx
addl %edx, %r9d
xorl %r11d, %ebx
rorxl $6, %r13d, %edx
rorxl $11, %r13d, %ecx
addl %ebx, %r9d
addl 508(%rsp), %r8d
movl %r14d, %ebx
xorl %edx, %ecx
xorl %r15d, %ebx
rorxl $25, %r13d, %edx
xorl %ecx, %edx
andl %r13d, %ebx
addl %edx, %r8d
rorxl $2, %r9d, %edx
rorxl $13, %r9d, %ecx
xorl %r15d, %ebx
xorl %edx, %ecx
rorxl $22, %r9d, %edx
addl %ebx, %r8d
xorl %ecx, %edx
movl %r10d, %ebx
leal (%r12,%r8,1), %r12d
xorl %r9d, %ebx
andl %ebx, %eax
addl %edx, %r8d
xorl %r10d, %eax
addl %eax, %r8d
addq $0x80, %rsi
addl (%rdi), %r8d
addl 4(%rdi), %r9d
addl 8(%rdi), %r10d
addl 12(%rdi), %r11d
addl 16(%rdi), %r12d
addl 20(%rdi), %r13d
addl 24(%rdi), %r14d
addl 28(%rdi), %r15d
subl $0x80, 512(%rsp)
movl %r8d, (%rdi)
movl %r9d, 4(%rdi)
movl %r10d, 8(%rdi)
movl %r11d, 12(%rdi)
movl %r12d, 16(%rdi)
movl %r13d, 20(%rdi)
movl %r14d, 24(%rdi)
movl %r15d, 28(%rdi)
jnz L_sha256_len_avx2_rorx_start
L_sha256_len_avx2_rorx_done:
xorq %rax, %rax
vzeroupper
addq $0x204, %rsp
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size Transform_Sha256_AVX2_RORX_Len,.-Transform_Sha256_AVX2_RORX_Len
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* WOLFSSL_X86_64_BUILD */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
aerisarn/mesa-uwp
| 66,736
|
src/util/blake3/blake3_avx2_x86-64_windows_gnu.S
|
.intel_syntax noprefix
.global _blake3_hash_many_avx2
.global blake3_hash_many_avx2
.section .text
.p2align 6
_blake3_hash_many_avx2:
blake3_hash_many_avx2:
push r15
push r14
push r13
push r12
push rsi
push rdi
push rbx
push rbp
mov rbp, rsp
sub rsp, 880
and rsp, 0xFFFFFFFFFFFFFFC0
vmovdqa xmmword ptr [rsp+0x2D0], xmm6
vmovdqa xmmword ptr [rsp+0x2E0], xmm7
vmovdqa xmmword ptr [rsp+0x2F0], xmm8
vmovdqa xmmword ptr [rsp+0x300], xmm9
vmovdqa xmmword ptr [rsp+0x310], xmm10
vmovdqa xmmword ptr [rsp+0x320], xmm11
vmovdqa xmmword ptr [rsp+0x330], xmm12
vmovdqa xmmword ptr [rsp+0x340], xmm13
vmovdqa xmmword ptr [rsp+0x350], xmm14
vmovdqa xmmword ptr [rsp+0x360], xmm15
mov rdi, rcx
mov rsi, rdx
mov rdx, r8
mov rcx, r9
mov r8, qword ptr [rbp+0x68]
movzx r9, byte ptr [rbp+0x70]
neg r9d
vmovd xmm0, r9d
vpbroadcastd ymm0, xmm0
vmovdqa ymmword ptr [rsp+0x260], ymm0
vpand ymm1, ymm0, ymmword ptr [ADD0+rip]
vpand ymm2, ymm0, ymmword ptr [ADD1+rip]
vmovdqa ymmword ptr [rsp+0x2A0], ymm2
vmovd xmm2, r8d
vpbroadcastd ymm2, xmm2
vpaddd ymm2, ymm2, ymm1
vmovdqa ymmword ptr [rsp+0x220], ymm2
vpxor ymm1, ymm1, ymmword ptr [CMP_MSB_MASK+rip]
vpxor ymm2, ymm2, ymmword ptr [CMP_MSB_MASK+rip]
vpcmpgtd ymm2, ymm1, ymm2
shr r8, 32
vmovd xmm3, r8d
vpbroadcastd ymm3, xmm3
vpsubd ymm3, ymm3, ymm2
vmovdqa ymmword ptr [rsp+0x240], ymm3
shl rdx, 6
mov qword ptr [rsp+0x2C0], rdx
cmp rsi, 8
jc 3f
2:
vpbroadcastd ymm0, dword ptr [rcx]
vpbroadcastd ymm1, dword ptr [rcx+0x4]
vpbroadcastd ymm2, dword ptr [rcx+0x8]
vpbroadcastd ymm3, dword ptr [rcx+0xC]
vpbroadcastd ymm4, dword ptr [rcx+0x10]
vpbroadcastd ymm5, dword ptr [rcx+0x14]
vpbroadcastd ymm6, dword ptr [rcx+0x18]
vpbroadcastd ymm7, dword ptr [rcx+0x1C]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
mov r12, qword ptr [rdi+0x20]
mov r13, qword ptr [rdi+0x28]
mov r14, qword ptr [rdi+0x30]
mov r15, qword ptr [rdi+0x38]
movzx eax, byte ptr [rbp+0x78]
movzx ebx, byte ptr [rbp+0x80]
or eax, ebx
xor edx, edx
.p2align 5
9:
movzx ebx, byte ptr [rbp+0x88]
or ebx, eax
add rdx, 64
cmp rdx, qword ptr [rsp+0x2C0]
cmove eax, ebx
mov dword ptr [rsp+0x200], eax
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x40], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x40]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x40], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x40]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x40], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x40]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x40], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0x20], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0x40], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0x60], ymm11
vmovups xmm8, xmmword ptr [r8+rdx-0x30]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x30], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x30], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x30]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x30], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x30]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x30], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp+0x80], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0xA0], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0xC0], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0xE0], ymm11
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x20], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x20]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x20], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x20]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x20], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x20]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x20], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp+0x100], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0x120], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0x140], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0x160], ymm11
vmovups xmm8, xmmword ptr [r8+rdx-0x10]
vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x10], 0x01
vmovups xmm9, xmmword ptr [r9+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x10], 0x01
vunpcklpd ymm12, ymm8, ymm9
vunpckhpd ymm13, ymm8, ymm9
vmovups xmm10, xmmword ptr [r10+rdx-0x10]
vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x10], 0x01
vmovups xmm11, xmmword ptr [r11+rdx-0x10]
vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x10], 0x01
vunpcklpd ymm14, ymm10, ymm11
vunpckhpd ymm15, ymm10, ymm11
vshufps ymm8, ymm12, ymm14, 136
vmovaps ymmword ptr [rsp+0x180], ymm8
vshufps ymm9, ymm12, ymm14, 221
vmovaps ymmword ptr [rsp+0x1A0], ymm9
vshufps ymm10, ymm13, ymm15, 136
vmovaps ymmword ptr [rsp+0x1C0], ymm10
vshufps ymm11, ymm13, ymm15, 221
vmovaps ymmword ptr [rsp+0x1E0], ymm11
vpbroadcastd ymm15, dword ptr [rsp+0x200]
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r12+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r13+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r14+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
prefetcht0 [r15+rdx+0x80]
vpaddd ymm0, ymm0, ymmword ptr [rsp]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm0, ymmword ptr [rsp+0x220]
vpxor ymm13, ymm1, ymmword ptr [rsp+0x240]
vpxor ymm14, ymm2, ymmword ptr [BLAKE3_BLOCK_LEN+rip]
vpxor ymm15, ymm3, ymm15
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [BLAKE3_IV_0+rip]
vpaddd ymm9, ymm13, ymmword ptr [BLAKE3_IV_1+rip]
vpaddd ymm10, ymm14, ymmword ptr [BLAKE3_IV_2+rip]
vpaddd ymm11, ymm15, ymmword ptr [BLAKE3_IV_3+rip]
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x100]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xE0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x160]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xA0]
vpaddd ymm1, ymm1, ymmword ptr [rsp]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x180]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x140]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0]
vpaddd ymm1, ymm1, ymmword ptr [rsp]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0xC0]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1E0]
vpaddd ymm1, ymm1, ymmword ptr [rsp]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0]
vpaddd ymm0, ymm0, ymm4
vpaddd ymm1, ymm1, ymm5
vpaddd ymm2, ymm2, ymm6
vpaddd ymm3, ymm3, ymm7
vpxor ymm12, ymm12, ymm0
vpxor ymm13, ymm13, ymm1
vpxor ymm14, ymm14, ymm2
vpxor ymm15, ymm15, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpshufb ymm15, ymm15, ymm8
vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm13
vpaddd ymm10, ymm10, ymm14
vpaddd ymm11, ymm11, ymm15
vpxor ymm4, ymm4, ymm8
vpxor ymm5, ymm5, ymm9
vpxor ymm6, ymm6, ymm10
vpxor ymm7, ymm7, ymm11
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT16+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vmovdqa ymmword ptr [rsp+0x200], ymm8
vpsrld ymm8, ymm5, 12
vpslld ymm5, ymm5, 20
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 12
vpslld ymm6, ymm6, 20
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 12
vpslld ymm7, ymm7, 20
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 12
vpslld ymm4, ymm4, 20
vpor ymm4, ymm4, ymm8
vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140]
vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180]
vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80]
vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0]
vpaddd ymm0, ymm0, ymm5
vpaddd ymm1, ymm1, ymm6
vpaddd ymm2, ymm2, ymm7
vpaddd ymm3, ymm3, ymm4
vpxor ymm15, ymm15, ymm0
vpxor ymm12, ymm12, ymm1
vpxor ymm13, ymm13, ymm2
vpxor ymm14, ymm14, ymm3
vbroadcasti128 ymm8, xmmword ptr [ROT8+rip]
vpshufb ymm15, ymm15, ymm8
vpshufb ymm12, ymm12, ymm8
vpshufb ymm13, ymm13, ymm8
vpshufb ymm14, ymm14, ymm8
vpaddd ymm10, ymm10, ymm15
vpaddd ymm11, ymm11, ymm12
vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200]
vpaddd ymm9, ymm9, ymm14
vpxor ymm5, ymm5, ymm10
vpxor ymm6, ymm6, ymm11
vpxor ymm7, ymm7, ymm8
vpxor ymm4, ymm4, ymm9
vpxor ymm0, ymm0, ymm8
vpxor ymm1, ymm1, ymm9
vpxor ymm2, ymm2, ymm10
vpxor ymm3, ymm3, ymm11
vpsrld ymm8, ymm5, 7
vpslld ymm5, ymm5, 25
vpor ymm5, ymm5, ymm8
vpsrld ymm8, ymm6, 7
vpslld ymm6, ymm6, 25
vpor ymm6, ymm6, ymm8
vpsrld ymm8, ymm7, 7
vpslld ymm7, ymm7, 25
vpor ymm7, ymm7, ymm8
vpsrld ymm8, ymm4, 7
vpslld ymm4, ymm4, 25
vpor ymm4, ymm4, ymm8
vpxor ymm4, ymm4, ymm12
vpxor ymm5, ymm5, ymm13
vpxor ymm6, ymm6, ymm14
vpxor ymm7, ymm7, ymm15
movzx eax, byte ptr [rbp+0x78]
jne 9b
mov rbx, qword ptr [rbp+0x90]
vunpcklps ymm8, ymm0, ymm1
vunpcklps ymm9, ymm2, ymm3
vunpckhps ymm10, ymm0, ymm1
vunpcklps ymm11, ymm4, ymm5
vunpcklps ymm0, ymm6, ymm7
vshufps ymm12, ymm8, ymm9, 78
vblendps ymm1, ymm8, ymm12, 0xCC
vshufps ymm8, ymm11, ymm0, 78
vunpckhps ymm13, ymm2, ymm3
vblendps ymm2, ymm11, ymm8, 0xCC
vblendps ymm3, ymm12, ymm9, 0xCC
vperm2f128 ymm12, ymm1, ymm2, 0x20
vmovups ymmword ptr [rbx], ymm12
vunpckhps ymm14, ymm4, ymm5
vblendps ymm4, ymm8, ymm0, 0xCC
vunpckhps ymm15, ymm6, ymm7
vperm2f128 ymm7, ymm3, ymm4, 0x20
vmovups ymmword ptr [rbx+0x20], ymm7
vshufps ymm5, ymm10, ymm13, 78
vblendps ymm6, ymm5, ymm13, 0xCC
vshufps ymm13, ymm14, ymm15, 78
vblendps ymm10, ymm10, ymm5, 0xCC
vblendps ymm14, ymm14, ymm13, 0xCC
vperm2f128 ymm8, ymm10, ymm14, 0x20
vmovups ymmword ptr [rbx+0x40], ymm8
vblendps ymm15, ymm13, ymm15, 0xCC
vperm2f128 ymm13, ymm6, ymm15, 0x20
vmovups ymmword ptr [rbx+0x60], ymm13
vperm2f128 ymm9, ymm1, ymm2, 0x31
vperm2f128 ymm11, ymm3, ymm4, 0x31
vmovups ymmword ptr [rbx+0x80], ymm9
vperm2f128 ymm14, ymm10, ymm14, 0x31
vperm2f128 ymm15, ymm6, ymm15, 0x31
vmovups ymmword ptr [rbx+0xA0], ymm11
vmovups ymmword ptr [rbx+0xC0], ymm14
vmovups ymmword ptr [rbx+0xE0], ymm15
vmovdqa ymm0, ymmword ptr [rsp+0x2A0]
vpaddd ymm1, ymm0, ymmword ptr [rsp+0x220]
vmovdqa ymmword ptr [rsp+0x220], ymm1
vpxor ymm0, ymm0, ymmword ptr [CMP_MSB_MASK+rip]
vpxor ymm2, ymm1, ymmword ptr [CMP_MSB_MASK+rip]
vpcmpgtd ymm2, ymm0, ymm2
vmovdqa ymm0, ymmword ptr [rsp+0x240]
vpsubd ymm2, ymm0, ymm2
vmovdqa ymmword ptr [rsp+0x240], ymm2
add rdi, 64
add rbx, 256
mov qword ptr [rbp+0x90], rbx
sub rsi, 8
cmp rsi, 8
jnc 2b
test rsi, rsi
jnz 3f
4:
vzeroupper
vmovdqa xmm6, xmmword ptr [rsp+0x2D0]
vmovdqa xmm7, xmmword ptr [rsp+0x2E0]
vmovdqa xmm8, xmmword ptr [rsp+0x2F0]
vmovdqa xmm9, xmmword ptr [rsp+0x300]
vmovdqa xmm10, xmmword ptr [rsp+0x310]
vmovdqa xmm11, xmmword ptr [rsp+0x320]
vmovdqa xmm12, xmmword ptr [rsp+0x330]
vmovdqa xmm13, xmmword ptr [rsp+0x340]
vmovdqa xmm14, xmmword ptr [rsp+0x350]
vmovdqa xmm15, xmmword ptr [rsp+0x360]
mov rsp, rbp
pop rbp
pop rbx
pop rdi
pop rsi
pop r12
pop r13
pop r14
pop r15
ret
.p2align 5
3:
mov rbx, qword ptr [rbp+0x90]
mov r15, qword ptr [rsp+0x2C0]
movzx r13d, byte ptr [rbp+0x78]
movzx r12d, byte ptr [rbp+0x88]
test rsi, 0x4
je 3f
vbroadcasti128 ymm0, xmmword ptr [rcx]
vbroadcasti128 ymm1, xmmword ptr [rcx+0x10]
vmovdqa ymm8, ymm0
vmovdqa ymm9, ymm1
vbroadcasti128 ymm12, xmmword ptr [rsp+0x220]
vbroadcasti128 ymm13, xmmword ptr [rsp+0x240]
vpunpckldq ymm14, ymm12, ymm13
vpunpckhdq ymm15, ymm12, ymm13
vpermq ymm14, ymm14, 0x50
vpermq ymm15, ymm15, 0x50
vbroadcasti128 ymm12, xmmword ptr [BLAKE3_BLOCK_LEN+rip]
vpblendd ymm14, ymm14, ymm12, 0x44
vpblendd ymm15, ymm15, ymm12, 0x44
vmovdqa ymmword ptr [rsp], ymm14
vmovdqa ymmword ptr [rsp+0x20], ymm15
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
movzx eax, byte ptr [rbp+0x80]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x200], eax
vmovups ymm2, ymmword ptr [r8+rdx-0x40]
vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x40], 0x01
vmovups ymm3, ymmword ptr [r8+rdx-0x30]
vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x30], 0x01
vshufps ymm4, ymm2, ymm3, 136
vshufps ymm5, ymm2, ymm3, 221
vmovups ymm2, ymmword ptr [r8+rdx-0x20]
vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x20], 0x01
vmovups ymm3, ymmword ptr [r8+rdx-0x10]
vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x10], 0x01
vshufps ymm6, ymm2, ymm3, 136
vshufps ymm7, ymm2, ymm3, 221
vpshufd ymm6, ymm6, 0x93
vpshufd ymm7, ymm7, 0x93
vmovups ymm10, ymmword ptr [r10+rdx-0x40]
vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x40], 0x01
vmovups ymm11, ymmword ptr [r10+rdx-0x30]
vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x30], 0x01
vshufps ymm12, ymm10, ymm11, 136
vshufps ymm13, ymm10, ymm11, 221
vmovups ymm10, ymmword ptr [r10+rdx-0x20]
vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x20], 0x01
vmovups ymm11, ymmword ptr [r10+rdx-0x10]
vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x10], 0x01
vshufps ymm14, ymm10, ymm11, 136
vshufps ymm15, ymm10, ymm11, 221
vpshufd ymm14, ymm14, 0x93
vpshufd ymm15, ymm15, 0x93
vpbroadcastd ymm2, dword ptr [rsp+0x200]
vmovdqa ymm3, ymmword ptr [rsp]
vmovdqa ymm11, ymmword ptr [rsp+0x20]
vpblendd ymm3, ymm3, ymm2, 0x88
vpblendd ymm11, ymm11, ymm2, 0x88
vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip]
vmovdqa ymm10, ymm2
mov al, 7
9:
vpaddd ymm0, ymm0, ymm4
vpaddd ymm8, ymm8, ymm12
vmovdqa ymmword ptr [rsp+0x40], ymm4
nop
vmovdqa ymmword ptr [rsp+0x60], ymm12
nop
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT16+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 12
vpslld ymm9, ymm9, 20
vpor ymm9, ymm9, ymm4
vpaddd ymm0, ymm0, ymm5
vpaddd ymm8, ymm8, ymm13
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vmovdqa ymmword ptr [rsp+0x80], ymm5
vmovdqa ymmword ptr [rsp+0xA0], ymm13
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT8+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 7
vpslld ymm9, ymm9, 25
vpor ymm9, ymm9, ymm4
vpshufd ymm0, ymm0, 0x93
vpshufd ymm8, ymm8, 0x93
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm11, ymm11, 0x4E
vpshufd ymm2, ymm2, 0x39
vpshufd ymm10, ymm10, 0x39
vpaddd ymm0, ymm0, ymm6
vpaddd ymm8, ymm8, ymm14
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT16+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 12
vpslld ymm9, ymm9, 20
vpor ymm9, ymm9, ymm4
vpaddd ymm0, ymm0, ymm7
vpaddd ymm8, ymm8, ymm15
vpaddd ymm0, ymm0, ymm1
vpaddd ymm8, ymm8, ymm9
vpxor ymm3, ymm3, ymm0
vpxor ymm11, ymm11, ymm8
vbroadcasti128 ymm4, xmmword ptr [ROT8+rip]
vpshufb ymm3, ymm3, ymm4
vpshufb ymm11, ymm11, ymm4
vpaddd ymm2, ymm2, ymm3
vpaddd ymm10, ymm10, ymm11
vpxor ymm1, ymm1, ymm2
vpxor ymm9, ymm9, ymm10
vpsrld ymm4, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm4
vpsrld ymm4, ymm9, 7
vpslld ymm9, ymm9, 25
vpor ymm9, ymm9, ymm4
vpshufd ymm0, ymm0, 0x39
vpshufd ymm8, ymm8, 0x39
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm11, ymm11, 0x4E
vpshufd ymm2, ymm2, 0x93
vpshufd ymm10, ymm10, 0x93
dec al
je 9f
vmovdqa ymm4, ymmword ptr [rsp+0x40]
vmovdqa ymm5, ymmword ptr [rsp+0x80]
vshufps ymm12, ymm4, ymm5, 214
vpshufd ymm13, ymm4, 0x0F
vpshufd ymm4, ymm12, 0x39
vshufps ymm12, ymm6, ymm7, 250
vpblendd ymm13, ymm13, ymm12, 0xAA
vpunpcklqdq ymm12, ymm7, ymm5
vpblendd ymm12, ymm12, ymm6, 0x88
vpshufd ymm12, ymm12, 0x78
vpunpckhdq ymm5, ymm5, ymm7
vpunpckldq ymm6, ymm6, ymm5
vpshufd ymm7, ymm6, 0x1E
vmovdqa ymmword ptr [rsp+0x40], ymm13
vmovdqa ymmword ptr [rsp+0x80], ymm12
vmovdqa ymm12, ymmword ptr [rsp+0x60]
vmovdqa ymm13, ymmword ptr [rsp+0xA0]
vshufps ymm5, ymm12, ymm13, 214
vpshufd ymm6, ymm12, 0x0F
vpshufd ymm12, ymm5, 0x39
vshufps ymm5, ymm14, ymm15, 250
vpblendd ymm6, ymm6, ymm5, 0xAA
vpunpcklqdq ymm5, ymm15, ymm13
vpblendd ymm5, ymm5, ymm14, 0x88
vpshufd ymm5, ymm5, 0x78
vpunpckhdq ymm13, ymm13, ymm15
vpunpckldq ymm14, ymm14, ymm13
vpshufd ymm15, ymm14, 0x1E
vmovdqa ymm13, ymm6
vmovdqa ymm14, ymm5
vmovdqa ymm5, ymmword ptr [rsp+0x40]
vmovdqa ymm6, ymmword ptr [rsp+0x80]
jmp 9b
9:
vpxor ymm0, ymm0, ymm2
vpxor ymm1, ymm1, ymm3
vpxor ymm8, ymm8, ymm10
vpxor ymm9, ymm9, ymm11
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vmovdqu xmmword ptr [rbx+0x40], xmm8
vmovdqu xmmword ptr [rbx+0x50], xmm9
vextracti128 xmmword ptr [rbx+0x60], ymm8, 0x01
vextracti128 xmmword ptr [rbx+0x70], ymm9, 0x01
vmovaps xmm8, xmmword ptr [rsp+0x260]
vmovaps xmm0, xmmword ptr [rsp+0x220]
vmovaps xmm1, xmmword ptr [rsp+0x230]
vmovaps xmm2, xmmword ptr [rsp+0x240]
vmovaps xmm3, xmmword ptr [rsp+0x250]
vblendvps xmm0, xmm0, xmm1, xmm8
vblendvps xmm2, xmm2, xmm3, xmm8
vmovaps xmmword ptr [rsp+0x220], xmm0
vmovaps xmmword ptr [rsp+0x240], xmm2
add rbx, 128
add rdi, 32
sub rsi, 4
3:
test rsi, 0x2
je 3f
vbroadcasti128 ymm0, xmmword ptr [rcx]
vbroadcasti128 ymm1, xmmword ptr [rcx+0x10]
vmovd xmm13, dword ptr [rsp+0x220]
vpinsrd xmm13, xmm13, dword ptr [rsp+0x240], 1
vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovd xmm14, dword ptr [rsp+0x224]
vpinsrd xmm14, xmm14, dword ptr [rsp+0x244], 1
vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vinserti128 ymm13, ymm13, xmm14, 0x01
vbroadcasti128 ymm14, xmmword ptr [ROT16+rip]
vbroadcasti128 ymm15, xmmword ptr [ROT8+rip]
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x80]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
mov dword ptr [rsp+0x200], eax
vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip]
vpbroadcastd ymm8, dword ptr [rsp+0x200]
vpblendd ymm3, ymm13, ymm8, 0x88
vmovups ymm8, ymmword ptr [r8+rdx-0x40]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x40], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x30]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x30], 0x01
vshufps ymm4, ymm8, ymm9, 136
vshufps ymm5, ymm8, ymm9, 221
vmovups ymm8, ymmword ptr [r8+rdx-0x20]
vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x20], 0x01
vmovups ymm9, ymmword ptr [r8+rdx-0x10]
vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x10], 0x01
vshufps ymm6, ymm8, ymm9, 136
vshufps ymm7, ymm8, ymm9, 221
vpshufd ymm6, ymm6, 0x93
vpshufd ymm7, ymm7, 0x93
mov al, 7
9:
vpaddd ymm0, ymm0, ymm4
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm14
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm8
vpaddd ymm0, ymm0, ymm5
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm15
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm8
vpshufd ymm0, ymm0, 0x93
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x39
vpaddd ymm0, ymm0, ymm6
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm14
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 12
vpslld ymm1, ymm1, 20
vpor ymm1, ymm1, ymm8
vpaddd ymm0, ymm0, ymm7
vpaddd ymm0, ymm0, ymm1
vpxor ymm3, ymm3, ymm0
vpshufb ymm3, ymm3, ymm15
vpaddd ymm2, ymm2, ymm3
vpxor ymm1, ymm1, ymm2
vpsrld ymm8, ymm1, 7
vpslld ymm1, ymm1, 25
vpor ymm1, ymm1, ymm8
vpshufd ymm0, ymm0, 0x39
vpshufd ymm3, ymm3, 0x4E
vpshufd ymm2, ymm2, 0x93
dec al
jz 9f
vshufps ymm8, ymm4, ymm5, 214
vpshufd ymm9, ymm4, 0x0F
vpshufd ymm4, ymm8, 0x39
vshufps ymm8, ymm6, ymm7, 250
vpblendd ymm9, ymm9, ymm8, 0xAA
vpunpcklqdq ymm8, ymm7, ymm5
vpblendd ymm8, ymm8, ymm6, 0x88
vpshufd ymm8, ymm8, 0x78
vpunpckhdq ymm5, ymm5, ymm7
vpunpckldq ymm6, ymm6, ymm5
vpshufd ymm7, ymm6, 0x1E
vmovdqa ymm5, ymm9
vmovdqa ymm6, ymm8
jmp 9b
9:
vpxor ymm0, ymm0, ymm2
vpxor ymm1, ymm1, ymm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01
vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01
vmovaps ymm8, ymmword ptr [rsp+0x260]
vmovaps ymm0, ymmword ptr [rsp+0x220]
vmovups ymm1, ymmword ptr [rsp+0x228]
vmovaps ymm2, ymmword ptr [rsp+0x240]
vmovups ymm3, ymmword ptr [rsp+0x248]
vblendvps ymm0, ymm0, ymm1, ymm8
vblendvps ymm2, ymm2, ymm3, ymm8
vmovaps ymmword ptr [rsp+0x220], ymm0
vmovaps ymmword ptr [rsp+0x240], ymm2
add rbx, 64
add rdi, 16
sub rsi, 2
3:
test rsi, 0x1
je 4b
vmovdqu xmm0, xmmword ptr [rcx]
vmovdqu xmm1, xmmword ptr [rcx+0x10]
vmovd xmm3, dword ptr [rsp+0x220]
vpinsrd xmm3, xmm3, dword ptr [rsp+0x240], 1
vpinsrd xmm13, xmm3, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
vmovdqa xmm14, xmmword ptr [ROT16+rip]
vmovdqa xmm15, xmmword ptr [ROT8+rip]
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x80]
or eax, r13d
xor edx, edx
.p2align 5
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
vmovdqa xmm2, xmmword ptr [BLAKE3_IV+rip]
vmovdqa xmm3, xmm13
vpinsrd xmm3, xmm3, eax, 3
vmovups xmm8, xmmword ptr [r8+rdx-0x40]
vmovups xmm9, xmmword ptr [r8+rdx-0x30]
vshufps xmm4, xmm8, xmm9, 136
vshufps xmm5, xmm8, xmm9, 221
vmovups xmm8, xmmword ptr [r8+rdx-0x20]
vmovups xmm9, xmmword ptr [r8+rdx-0x10]
vshufps xmm6, xmm8, xmm9, 136
vshufps xmm7, xmm8, xmm9, 221
vpshufd xmm6, xmm6, 0x93
vpshufd xmm7, xmm7, 0x93
mov al, 7
9:
vpaddd xmm0, xmm0, xmm4
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm14
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 12
vpslld xmm1, xmm1, 20
vpor xmm1, xmm1, xmm8
vpaddd xmm0, xmm0, xmm5
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm15
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 7
vpslld xmm1, xmm1, 25
vpor xmm1, xmm1, xmm8
vpshufd xmm0, xmm0, 0x93
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x39
vpaddd xmm0, xmm0, xmm6
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm14
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 12
vpslld xmm1, xmm1, 20
vpor xmm1, xmm1, xmm8
vpaddd xmm0, xmm0, xmm7
vpaddd xmm0, xmm0, xmm1
vpxor xmm3, xmm3, xmm0
vpshufb xmm3, xmm3, xmm15
vpaddd xmm2, xmm2, xmm3
vpxor xmm1, xmm1, xmm2
vpsrld xmm8, xmm1, 7
vpslld xmm1, xmm1, 25
vpor xmm1, xmm1, xmm8
vpshufd xmm0, xmm0, 0x39
vpshufd xmm3, xmm3, 0x4E
vpshufd xmm2, xmm2, 0x93
dec al
jz 9f
vshufps xmm8, xmm4, xmm5, 214
vpshufd xmm9, xmm4, 0x0F
vpshufd xmm4, xmm8, 0x39
vshufps xmm8, xmm6, xmm7, 250
vpblendd xmm9, xmm9, xmm8, 0xAA
vpunpcklqdq xmm8, xmm7, xmm5
vpblendd xmm8, xmm8, xmm6, 0x88
vpshufd xmm8, xmm8, 0x78
vpunpckhdq xmm5, xmm5, xmm7
vpunpckldq xmm6, xmm6, xmm5
vpshufd xmm7, xmm6, 0x1E
vmovdqa xmm5, xmm9
vmovdqa xmm6, xmm8
jmp 9b
9:
vpxor xmm0, xmm0, xmm2
vpxor xmm1, xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
vmovdqu xmmword ptr [rbx], xmm0
vmovdqu xmmword ptr [rbx+0x10], xmm1
jmp 4b
.section .rodata
.p2align 6
ADD0:
.long 0, 1, 2, 3, 4, 5, 6, 7
ADD1:
.long 8, 8, 8, 8, 8, 8, 8, 8
BLAKE3_IV_0:
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
BLAKE3_BLOCK_LEN:
.long 0x00000040, 0x00000040, 0x00000040, 0x00000040
.long 0x00000040, 0x00000040, 0x00000040, 0x00000040
ROT16:
.byte 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13
ROT8:
.byte 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
BLAKE3_IV:
.long 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A
|
aerisarn/mesa-uwp
| 69,094
|
src/util/blake3/blake3_sse2_x86-64_unix.S
|
#include "mesa_blake3_visibility.h"
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
#endif
#if defined(__ELF__) && defined(__CET__) && defined(__has_include)
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#endif
#if !defined(_CET_ENDBR)
#define _CET_ENDBR
#endif
.intel_syntax noprefix
HIDDEN blake3_hash_many_sse2
HIDDEN _blake3_hash_many_sse2
HIDDEN blake3_compress_in_place_sse2
HIDDEN _blake3_compress_in_place_sse2
HIDDEN blake3_compress_xof_sse2
HIDDEN _blake3_compress_xof_sse2
.global blake3_hash_many_sse2
.global _blake3_hash_many_sse2
.global blake3_compress_in_place_sse2
.global _blake3_compress_in_place_sse2
.global blake3_compress_xof_sse2
.global _blake3_compress_xof_sse2
#ifdef __APPLE__
.text
#else
.section .text
#endif
.p2align 6
_blake3_hash_many_sse2:
blake3_hash_many_sse2:
_CET_ENDBR
push r15
push r14
push r13
push r12
push rbx
push rbp
mov rbp, rsp
sub rsp, 360
and rsp, 0xFFFFFFFFFFFFFFC0
neg r9d
movd xmm0, r9d
pshufd xmm0, xmm0, 0x00
movdqa xmmword ptr [rsp+0x130], xmm0
movdqa xmm1, xmm0
pand xmm1, xmmword ptr [ADD0+rip]
pand xmm0, xmmword ptr [ADD1+rip]
movdqa xmmword ptr [rsp+0x150], xmm0
movd xmm0, r8d
pshufd xmm0, xmm0, 0x00
paddd xmm0, xmm1
movdqa xmmword ptr [rsp+0x110], xmm0
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm1, xmm0
shr r8, 32
movd xmm2, r8d
pshufd xmm2, xmm2, 0x00
psubd xmm2, xmm1
movdqa xmmword ptr [rsp+0x120], xmm2
mov rbx, qword ptr [rbp+0x50]
mov r15, rdx
shl r15, 6
movzx r13d, byte ptr [rbp+0x38]
movzx r12d, byte ptr [rbp+0x48]
cmp rsi, 4
jc 3f
2:
movdqu xmm3, xmmword ptr [rcx]
pshufd xmm0, xmm3, 0x00
pshufd xmm1, xmm3, 0x55
pshufd xmm2, xmm3, 0xAA
pshufd xmm3, xmm3, 0xFF
movdqu xmm7, xmmword ptr [rcx+0x10]
pshufd xmm4, xmm7, 0x00
pshufd xmm5, xmm7, 0x55
pshufd xmm6, xmm7, 0xAA
pshufd xmm7, xmm7, 0xFF
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
9:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movdqu xmm8, xmmword ptr [r8+rdx-0x40]
movdqu xmm9, xmmword ptr [r9+rdx-0x40]
movdqu xmm10, xmmword ptr [r10+rdx-0x40]
movdqu xmm11, xmmword ptr [r11+rdx-0x40]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp], xmm8
movdqa xmmword ptr [rsp+0x10], xmm9
movdqa xmmword ptr [rsp+0x20], xmm12
movdqa xmmword ptr [rsp+0x30], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x30]
movdqu xmm9, xmmword ptr [r9+rdx-0x30]
movdqu xmm10, xmmword ptr [r10+rdx-0x30]
movdqu xmm11, xmmword ptr [r11+rdx-0x30]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x40], xmm8
movdqa xmmword ptr [rsp+0x50], xmm9
movdqa xmmword ptr [rsp+0x60], xmm12
movdqa xmmword ptr [rsp+0x70], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x20]
movdqu xmm9, xmmword ptr [r9+rdx-0x20]
movdqu xmm10, xmmword ptr [r10+rdx-0x20]
movdqu xmm11, xmmword ptr [r11+rdx-0x20]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x80], xmm8
movdqa xmmword ptr [rsp+0x90], xmm9
movdqa xmmword ptr [rsp+0xA0], xmm12
movdqa xmmword ptr [rsp+0xB0], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x10]
movdqu xmm9, xmmword ptr [r9+rdx-0x10]
movdqu xmm10, xmmword ptr [r10+rdx-0x10]
movdqu xmm11, xmmword ptr [r11+rdx-0x10]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0xC0], xmm8
movdqa xmmword ptr [rsp+0xD0], xmm9
movdqa xmmword ptr [rsp+0xE0], xmm12
movdqa xmmword ptr [rsp+0xF0], xmm13
movdqa xmm9, xmmword ptr [BLAKE3_IV_1+rip]
movdqa xmm10, xmmword ptr [BLAKE3_IV_2+rip]
movdqa xmm11, xmmword ptr [BLAKE3_IV_3+rip]
movdqa xmm12, xmmword ptr [rsp+0x110]
movdqa xmm13, xmmword ptr [rsp+0x120]
movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN+rip]
movd xmm15, eax
pshufd xmm15, xmm15, 0x00
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [BLAKE3_IV_0+rip]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x80]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x70]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xB0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x50]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xC0]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xA0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0x60]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xF0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
pshuflw xmm15, xmm15, 0xB1
pshufhw xmm15, xmm15, 0xB1
pshuflw xmm12, xmm12, 0xB1
pshufhw xmm12, xmm12, 0xB1
pshuflw xmm13, xmm13, 0xB1
pshufhw xmm13, xmm13, 0xB1
pshuflw xmm14, xmm14, 0xB1
pshufhw xmm14, xmm14, 0xB1
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmm15
psrld xmm15, 8
pslld xmm8, 24
pxor xmm15, xmm8
movdqa xmm8, xmm12
psrld xmm12, 8
pslld xmm8, 24
pxor xmm12, xmm8
movdqa xmm8, xmm13
psrld xmm13, 8
pslld xmm8, 24
pxor xmm13, xmm8
movdqa xmm8, xmm14
psrld xmm14, 8
pslld xmm8, 24
pxor xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
pxor xmm0, xmm8
pxor xmm1, xmm9
pxor xmm2, xmm10
pxor xmm3, xmm11
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
pxor xmm4, xmm12
pxor xmm5, xmm13
pxor xmm6, xmm14
pxor xmm7, xmm15
mov eax, r13d
jne 9b
movdqa xmm9, xmm0
punpckldq xmm0, xmm1
punpckhdq xmm9, xmm1
movdqa xmm11, xmm2
punpckldq xmm2, xmm3
punpckhdq xmm11, xmm3
movdqa xmm1, xmm0
punpcklqdq xmm0, xmm2
punpckhqdq xmm1, xmm2
movdqa xmm3, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm3, xmm11
movdqu xmmword ptr [rbx], xmm0
movdqu xmmword ptr [rbx+0x20], xmm1
movdqu xmmword ptr [rbx+0x40], xmm9
movdqu xmmword ptr [rbx+0x60], xmm3
movdqa xmm9, xmm4
punpckldq xmm4, xmm5
punpckhdq xmm9, xmm5
movdqa xmm11, xmm6
punpckldq xmm6, xmm7
punpckhdq xmm11, xmm7
movdqa xmm5, xmm4
punpcklqdq xmm4, xmm6
punpckhqdq xmm5, xmm6
movdqa xmm7, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm7, xmm11
movdqu xmmword ptr [rbx+0x10], xmm4
movdqu xmmword ptr [rbx+0x30], xmm5
movdqu xmmword ptr [rbx+0x50], xmm9
movdqu xmmword ptr [rbx+0x70], xmm7
movdqa xmm1, xmmword ptr [rsp+0x110]
movdqa xmm0, xmm1
paddd xmm1, xmmword ptr [rsp+0x150]
movdqa xmmword ptr [rsp+0x110], xmm1
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm0, xmm1
movdqa xmm1, xmmword ptr [rsp+0x120]
psubd xmm1, xmm0
movdqa xmmword ptr [rsp+0x120], xmm1
add rbx, 128
add rdi, 32
sub rsi, 4
cmp rsi, 4
jnc 2b
test rsi, rsi
jnz 3f
4:
mov rsp, rbp
pop rbp
pop rbx
pop r12
pop r13
pop r14
pop r15
ret
.p2align 5
3:
test esi, 0x2
je 3f
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movaps xmm8, xmm0
movaps xmm9, xmm1
movd xmm13, dword ptr [rsp+0x110]
movd xmm14, dword ptr [rsp+0x120]
punpckldq xmm13, xmm14
movaps xmmword ptr [rsp], xmm13
movd xmm14, dword ptr [rsp+0x114]
movd xmm13, dword ptr [rsp+0x124]
punpckldq xmm14, xmm13
movaps xmmword ptr [rsp+0x10], xmm14
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movaps xmm10, xmm2
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm3, xmm4
shufps xmm4, xmm5, 136
shufps xmm3, xmm5, 221
movaps xmm5, xmm3
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm3, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm3, xmm7, 221
pshufd xmm7, xmm3, 0x93
movups xmm12, xmmword ptr [r9+rdx-0x40]
movups xmm13, xmmword ptr [r9+rdx-0x30]
movaps xmm11, xmm12
shufps xmm12, xmm13, 136
shufps xmm11, xmm13, 221
movaps xmm13, xmm11
movups xmm14, xmmword ptr [r9+rdx-0x20]
movups xmm15, xmmword ptr [r9+rdx-0x10]
movaps xmm11, xmm14
shufps xmm14, xmm15, 136
pshufd xmm14, xmm14, 0x93
shufps xmm11, xmm15, 221
pshufd xmm15, xmm11, 0x93
shl rax, 0x20
or rax, 0x40
movq xmm3, rax
movdqa xmmword ptr [rsp+0x20], xmm3
movaps xmm3, xmmword ptr [rsp]
movaps xmm11, xmmword ptr [rsp+0x10]
punpcklqdq xmm3, xmmword ptr [rsp+0x20]
punpcklqdq xmm11, xmmword ptr [rsp+0x20]
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm8, xmm12
movaps xmmword ptr [rsp+0x20], xmm4
movaps xmmword ptr [rsp+0x30], xmm12
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
pshuflw xmm11, xmm11, 0xB1
pshufhw xmm11, xmm11, 0xB1
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm5
paddd xmm8, xmm13
movaps xmmword ptr [rsp+0x40], xmm5
movaps xmmword ptr [rsp+0x50], xmm13
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movdqa xmm13, xmm3
psrld xmm3, 8
pslld xmm13, 24
pxor xmm3, xmm13
movdqa xmm13, xmm11
psrld xmm11, 8
pslld xmm13, 24
pxor xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x93
pshufd xmm8, xmm8, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x39
pshufd xmm10, xmm10, 0x39
paddd xmm0, xmm6
paddd xmm8, xmm14
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
pshuflw xmm11, xmm11, 0xB1
pshufhw xmm11, xmm11, 0xB1
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm7
paddd xmm8, xmm15
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movdqa xmm13, xmm3
psrld xmm3, 8
pslld xmm13, 24
pxor xmm3, xmm13
movdqa xmm13, xmm11
psrld xmm11, 8
pslld xmm13, 24
pxor xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x39
pshufd xmm8, xmm8, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x93
pshufd xmm10, xmm10, 0x93
dec al
je 9f
movdqa xmm12, xmmword ptr [rsp+0x20]
movdqa xmm5, xmmword ptr [rsp+0x40]
pshufd xmm13, xmm12, 0x0F
shufps xmm12, xmm5, 214
pshufd xmm4, xmm12, 0x39
movdqa xmm12, xmm6
shufps xmm12, xmm7, 250
pand xmm13, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm12, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm13, xmm12
movdqa xmmword ptr [rsp+0x20], xmm13
movdqa xmm12, xmm7
punpcklqdq xmm12, xmm5
movdqa xmm13, xmm6
pand xmm12, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm13, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm12, xmm13
pshufd xmm12, xmm12, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmmword ptr [rsp+0x40], xmm12
movdqa xmm5, xmmword ptr [rsp+0x30]
movdqa xmm13, xmmword ptr [rsp+0x50]
pshufd xmm6, xmm5, 0x0F
shufps xmm5, xmm13, 214
pshufd xmm12, xmm5, 0x39
movdqa xmm5, xmm14
shufps xmm5, xmm15, 250
pand xmm6, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm5, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm6, xmm5
movdqa xmm5, xmm15
punpcklqdq xmm5, xmm13
movdqa xmmword ptr [rsp+0x30], xmm2
movdqa xmm2, xmm14
pand xmm5, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm2, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm5, xmm2
movdqa xmm2, xmmword ptr [rsp+0x30]
pshufd xmm5, xmm5, 0x78
punpckhdq xmm13, xmm15
punpckldq xmm14, xmm13
pshufd xmm15, xmm14, 0x1E
movdqa xmm13, xmm6
movdqa xmm14, xmm5
movdqa xmm5, xmmword ptr [rsp+0x20]
movdqa xmm6, xmmword ptr [rsp+0x40]
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm8, xmm10
pxor xmm9, xmm11
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
movups xmmword ptr [rbx+0x20], xmm8
movups xmmword ptr [rbx+0x30], xmm9
mov eax, dword ptr [rsp+0x130]
neg eax
mov r10d, dword ptr [rsp+0x110+8*rax]
mov r11d, dword ptr [rsp+0x120+8*rax]
mov dword ptr [rsp+0x110], r10d
mov dword ptr [rsp+0x120], r11d
add rdi, 16
add rbx, 64
sub rsi, 2
3:
test esi, 0x1
je 4b
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movd xmm13, dword ptr [rsp+0x110]
movd xmm14, dword ptr [rsp+0x120]
punpckldq xmm13, xmm14
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x40]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
shl rax, 32
or rax, 64
movq xmm12, rax
movdqa xmm3, xmm13
punpcklqdq xmm3, xmm12
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm9, xmm8
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
movdqa xmm10, xmm6
pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm8, xmm10
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
jmp 4b
.p2align 6
blake3_compress_in_place_sse2:
_blake3_compress_in_place_sse2:
_CET_ENDBR
movups xmm0, xmmword ptr [rdi]
movups xmm1, xmmword ptr [rdi+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
shl r8, 32
add rdx, r8
movq xmm3, rcx
movq xmm4, rdx
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rsi]
movups xmm5, xmmword ptr [rsi+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rsi+0x20]
movups xmm7, xmmword ptr [rsi+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm9, xmm8
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
movdqa xmm10, xmm6
pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm8, xmm10
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
movups xmmword ptr [rdi], xmm0
movups xmmword ptr [rdi+0x10], xmm1
ret
.p2align 6
blake3_compress_xof_sse2:
_blake3_compress_xof_sse2:
_CET_ENDBR
movups xmm0, xmmword ptr [rdi]
movups xmm1, xmmword ptr [rdi+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movzx eax, r8b
movzx edx, dl
shl rax, 32
add rdx, rax
movq xmm3, rcx
movq xmm4, rdx
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rsi]
movups xmm5, xmmword ptr [rsi+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rsi+0x20]
movups xmm7, xmmword ptr [rsi+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshuflw xmm3, xmm3, 0xB1
pshufhw xmm3, xmm3, 0xB1
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
movdqa xmm14, xmm3
psrld xmm3, 8
pslld xmm14, 24
pxor xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pand xmm9, xmmword ptr [PBLENDW_0x33_MASK+rip]
pand xmm8, xmmword ptr [PBLENDW_0xCC_MASK+rip]
por xmm9, xmm8
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
movdqa xmm10, xmm6
pand xmm8, xmmword ptr [PBLENDW_0x3F_MASK+rip]
pand xmm10, xmmword ptr [PBLENDW_0xC0_MASK+rip]
por xmm8, xmm10
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
movdqu xmm4, xmmword ptr [rdi]
movdqu xmm5, xmmword ptr [rdi+0x10]
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm2, xmm4
pxor xmm3, xmm5
movups xmmword ptr [r9], xmm0
movups xmmword ptr [r9+0x10], xmm1
movups xmmword ptr [r9+0x20], xmm2
movups xmmword ptr [r9+0x30], xmm3
ret
#ifdef __APPLE__
.static_data
#else
.section .rodata
#endif
.p2align 6
BLAKE3_IV:
.long 0x6A09E667, 0xBB67AE85
.long 0x3C6EF372, 0xA54FF53A
ADD0:
.long 0, 1, 2, 3
ADD1:
.long 4, 4, 4, 4
BLAKE3_IV_0:
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
BLAKE3_BLOCK_LEN:
.long 64, 64, 64, 64
CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
PBLENDW_0x33_MASK:
.long 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000
PBLENDW_0xCC_MASK:
.long 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF
PBLENDW_0x3F_MASK:
.long 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000
PBLENDW_0xC0_MASK:
.long 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF
|
aenu1/aps3e
| 44,971
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/chacha_asm.S
|
/* chacha_asm.S */
/*
* Copyright (C) 2006-2024 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef WOLFSSL_USER_SETTINGS
#ifdef WOLFSSL_USER_SETTINGS_ASM
/*
* user_settings_asm.h is a file generated by the script user_settings_asm.sh.
* The script takes in a user_settings.h and produces user_settings_asm.h, which
* is a stripped down version of user_settings.h containing only preprocessor
* directives. This makes the header safe to include in assembly (.S) files.
*/
#include "user_settings_asm.h"
#else
/*
* Note: if user_settings.h contains any C code (e.g. a typedef or function
* prototype), including it here in an assembly (.S) file will cause an
* assembler failure. See user_settings_asm.h above.
*/
#include "user_settings.h"
#endif /* WOLFSSL_USER_SETTINGS_ASM */
#endif /* WOLFSSL_USER_SETTINGS */
#ifndef HAVE_INTEL_AVX1
#define HAVE_INTEL_AVX1
#endif /* HAVE_INTEL_AVX1 */
#ifndef NO_AVX2_SUPPORT
#define HAVE_INTEL_AVX2
#endif /* NO_AVX2_SUPPORT */
#ifdef WOLFSSL_X86_64_BUILD
#ifndef __APPLE__
.text
.globl chacha_encrypt_x64
.type chacha_encrypt_x64,@function
.align 16
chacha_encrypt_x64:
#else
.section __TEXT,__text
.globl _chacha_encrypt_x64
.p2align 4
_chacha_encrypt_x64:
#endif /* __APPLE__ */
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x40, %rsp
cmpl $0x40, %ecx
jl L_chacha_x64_small
L_chacha_x64_start:
subq $48, %rsp
movq %rdx, 24(%rsp)
movq %rsi, 32(%rsp)
movq %rcx, 40(%rsp)
movq 32(%rdi), %rax
movq 40(%rdi), %rbx
movq %rax, 8(%rsp)
movq %rbx, 16(%rsp)
movl (%rdi), %eax
movl 4(%rdi), %ebx
movl 8(%rdi), %ecx
movl 12(%rdi), %edx
movl 16(%rdi), %r8d
movl 20(%rdi), %r9d
movl 24(%rdi), %r10d
movl 28(%rdi), %r11d
movl 48(%rdi), %r12d
movl 52(%rdi), %r13d
movl 56(%rdi), %r14d
movl 60(%rdi), %r15d
movb $10, (%rsp)
movl 8(%rsp), %esi
movl 12(%rsp), %ebp
L_chacha_x64_block_crypt_start:
addl %r8d, %eax
addl %r9d, %ebx
xorl %eax, %r12d
xorl %ebx, %r13d
roll $16, %r12d
roll $16, %r13d
addl %r12d, %esi
addl %r13d, %ebp
xorl %esi, %r8d
xorl %ebp, %r9d
roll $12, %r8d
roll $12, %r9d
addl %r8d, %eax
addl %r9d, %ebx
xorl %eax, %r12d
xorl %ebx, %r13d
roll $8, %r12d
roll $8, %r13d
addl %r12d, %esi
addl %r13d, %ebp
xorl %esi, %r8d
xorl %ebp, %r9d
roll $7, %r8d
roll $7, %r9d
movl %esi, 8(%rsp)
movl %ebp, 12(%rsp)
movl 16(%rsp), %esi
movl 20(%rsp), %ebp
addl %r10d, %ecx
addl %r11d, %edx
xorl %ecx, %r14d
xorl %edx, %r15d
roll $16, %r14d
roll $16, %r15d
addl %r14d, %esi
addl %r15d, %ebp
xorl %esi, %r10d
xorl %ebp, %r11d
roll $12, %r10d
roll $12, %r11d
addl %r10d, %ecx
addl %r11d, %edx
xorl %ecx, %r14d
xorl %edx, %r15d
roll $8, %r14d
roll $8, %r15d
addl %r14d, %esi
addl %r15d, %ebp
xorl %esi, %r10d
xorl %ebp, %r11d
roll $7, %r10d
roll $7, %r11d
addl %r9d, %eax
addl %r10d, %ebx
xorl %eax, %r15d
xorl %ebx, %r12d
roll $16, %r15d
roll $16, %r12d
addl %r15d, %esi
addl %r12d, %ebp
xorl %esi, %r9d
xorl %ebp, %r10d
roll $12, %r9d
roll $12, %r10d
addl %r9d, %eax
addl %r10d, %ebx
xorl %eax, %r15d
xorl %ebx, %r12d
roll $8, %r15d
roll $8, %r12d
addl %r15d, %esi
addl %r12d, %ebp
xorl %esi, %r9d
xorl %ebp, %r10d
roll $7, %r9d
roll $7, %r10d
movl %esi, 16(%rsp)
movl %ebp, 20(%rsp)
movl 8(%rsp), %esi
movl 12(%rsp), %ebp
addl %r11d, %ecx
addl %r8d, %edx
xorl %ecx, %r13d
xorl %edx, %r14d
roll $16, %r13d
roll $16, %r14d
addl %r13d, %esi
addl %r14d, %ebp
xorl %esi, %r11d
xorl %ebp, %r8d
roll $12, %r11d
roll $12, %r8d
addl %r11d, %ecx
addl %r8d, %edx
xorl %ecx, %r13d
xorl %edx, %r14d
roll $8, %r13d
roll $8, %r14d
addl %r13d, %esi
addl %r14d, %ebp
xorl %esi, %r11d
xorl %ebp, %r8d
roll $7, %r11d
roll $7, %r8d
decb (%rsp)
jnz L_chacha_x64_block_crypt_start
movl %esi, 8(%rsp)
movl %ebp, 12(%rsp)
movq 32(%rsp), %rsi
movq 24(%rsp), %rbp
addl (%rdi), %eax
addl 4(%rdi), %ebx
addl 8(%rdi), %ecx
addl 12(%rdi), %edx
addl 16(%rdi), %r8d
addl 20(%rdi), %r9d
addl 24(%rdi), %r10d
addl 28(%rdi), %r11d
addl 48(%rdi), %r12d
addl 52(%rdi), %r13d
addl 56(%rdi), %r14d
addl 60(%rdi), %r15d
xorl (%rsi), %eax
xorl 4(%rsi), %ebx
xorl 8(%rsi), %ecx
xorl 12(%rsi), %edx
xorl 16(%rsi), %r8d
xorl 20(%rsi), %r9d
xorl 24(%rsi), %r10d
xorl 28(%rsi), %r11d
xorl 48(%rsi), %r12d
xorl 52(%rsi), %r13d
xorl 56(%rsi), %r14d
xorl 60(%rsi), %r15d
movl %eax, (%rbp)
movl %ebx, 4(%rbp)
movl %ecx, 8(%rbp)
movl %edx, 12(%rbp)
movl %r8d, 16(%rbp)
movl %r9d, 20(%rbp)
movl %r10d, 24(%rbp)
movl %r11d, 28(%rbp)
movl %r12d, 48(%rbp)
movl %r13d, 52(%rbp)
movl %r14d, 56(%rbp)
movl %r15d, 60(%rbp)
movl 8(%rsp), %eax
movl 12(%rsp), %ebx
movl 16(%rsp), %ecx
movl 20(%rsp), %edx
addl 32(%rdi), %eax
addl 36(%rdi), %ebx
addl 40(%rdi), %ecx
addl 44(%rdi), %edx
xorl 32(%rsi), %eax
xorl 36(%rsi), %ebx
xorl 40(%rsi), %ecx
xorl 44(%rsi), %edx
movl %eax, 32(%rbp)
movl %ebx, 36(%rbp)
movl %ecx, 40(%rbp)
movl %edx, 44(%rbp)
movq 24(%rsp), %rdx
movq 40(%rsp), %rcx
addl $0x01, 48(%rdi)
addq $48, %rsp
subl $0x40, %ecx
addq $0x40, %rsi
addq $0x40, %rdx
cmpl $0x40, %ecx
jge L_chacha_x64_start
L_chacha_x64_small:
cmpl $0x00, %ecx
je L_chacha_x64_done
subq $48, %rsp
movq %rdx, 24(%rsp)
movq %rsi, 32(%rsp)
movq %rcx, 40(%rsp)
movq 32(%rdi), %rax
movq 40(%rdi), %rbx
movq %rax, 8(%rsp)
movq %rbx, 16(%rsp)
movl (%rdi), %eax
movl 4(%rdi), %ebx
movl 8(%rdi), %ecx
movl 12(%rdi), %edx
movl 16(%rdi), %r8d
movl 20(%rdi), %r9d
movl 24(%rdi), %r10d
movl 28(%rdi), %r11d
movl 48(%rdi), %r12d
movl 52(%rdi), %r13d
movl 56(%rdi), %r14d
movl 60(%rdi), %r15d
movb $10, (%rsp)
movl 8(%rsp), %esi
movl 12(%rsp), %ebp
L_chacha_x64_partial_crypt_start:
addl %r8d, %eax
addl %r9d, %ebx
xorl %eax, %r12d
xorl %ebx, %r13d
roll $16, %r12d
roll $16, %r13d
addl %r12d, %esi
addl %r13d, %ebp
xorl %esi, %r8d
xorl %ebp, %r9d
roll $12, %r8d
roll $12, %r9d
addl %r8d, %eax
addl %r9d, %ebx
xorl %eax, %r12d
xorl %ebx, %r13d
roll $8, %r12d
roll $8, %r13d
addl %r12d, %esi
addl %r13d, %ebp
xorl %esi, %r8d
xorl %ebp, %r9d
roll $7, %r8d
roll $7, %r9d
movl %esi, 8(%rsp)
movl %ebp, 12(%rsp)
movl 16(%rsp), %esi
movl 20(%rsp), %ebp
addl %r10d, %ecx
addl %r11d, %edx
xorl %ecx, %r14d
xorl %edx, %r15d
roll $16, %r14d
roll $16, %r15d
addl %r14d, %esi
addl %r15d, %ebp
xorl %esi, %r10d
xorl %ebp, %r11d
roll $12, %r10d
roll $12, %r11d
addl %r10d, %ecx
addl %r11d, %edx
xorl %ecx, %r14d
xorl %edx, %r15d
roll $8, %r14d
roll $8, %r15d
addl %r14d, %esi
addl %r15d, %ebp
xorl %esi, %r10d
xorl %ebp, %r11d
roll $7, %r10d
roll $7, %r11d
addl %r9d, %eax
addl %r10d, %ebx
xorl %eax, %r15d
xorl %ebx, %r12d
roll $16, %r15d
roll $16, %r12d
addl %r15d, %esi
addl %r12d, %ebp
xorl %esi, %r9d
xorl %ebp, %r10d
roll $12, %r9d
roll $12, %r10d
addl %r9d, %eax
addl %r10d, %ebx
xorl %eax, %r15d
xorl %ebx, %r12d
roll $8, %r15d
roll $8, %r12d
addl %r15d, %esi
addl %r12d, %ebp
xorl %esi, %r9d
xorl %ebp, %r10d
roll $7, %r9d
roll $7, %r10d
movl %esi, 16(%rsp)
movl %ebp, 20(%rsp)
movl 8(%rsp), %esi
movl 12(%rsp), %ebp
addl %r11d, %ecx
addl %r8d, %edx
xorl %ecx, %r13d
xorl %edx, %r14d
roll $16, %r13d
roll $16, %r14d
addl %r13d, %esi
addl %r14d, %ebp
xorl %esi, %r11d
xorl %ebp, %r8d
roll $12, %r11d
roll $12, %r8d
addl %r11d, %ecx
addl %r8d, %edx
xorl %ecx, %r13d
xorl %edx, %r14d
roll $8, %r13d
roll $8, %r14d
addl %r13d, %esi
addl %r14d, %ebp
xorl %esi, %r11d
xorl %ebp, %r8d
roll $7, %r11d
roll $7, %r8d
decb (%rsp)
jnz L_chacha_x64_partial_crypt_start
movl %esi, 8(%rsp)
movl %ebp, 12(%rsp)
movq 32(%rsp), %rsi
addl (%rdi), %eax
addl 4(%rdi), %ebx
addl 8(%rdi), %ecx
addl 12(%rdi), %edx
addl 16(%rdi), %r8d
addl 20(%rdi), %r9d
addl 24(%rdi), %r10d
addl 28(%rdi), %r11d
addl 48(%rdi), %r12d
addl 52(%rdi), %r13d
addl 56(%rdi), %r14d
addl 60(%rdi), %r15d
leaq 80(%rdi), %rbp
movl %eax, (%rbp)
movl %ebx, 4(%rbp)
movl %ecx, 8(%rbp)
movl %edx, 12(%rbp)
movl %r8d, 16(%rbp)
movl %r9d, 20(%rbp)
movl %r10d, 24(%rbp)
movl %r11d, 28(%rbp)
movl %r12d, 48(%rbp)
movl %r13d, 52(%rbp)
movl %r14d, 56(%rbp)
movl %r15d, 60(%rbp)
movl 8(%rsp), %eax
movl 12(%rsp), %ebx
movl 16(%rsp), %ecx
movl 20(%rsp), %edx
addl 32(%rdi), %eax
addl 36(%rdi), %ebx
addl 40(%rdi), %ecx
addl 44(%rdi), %edx
movl %eax, 32(%rbp)
movl %ebx, 36(%rbp)
movl %ecx, 40(%rbp)
movl %edx, 44(%rbp)
movq 24(%rsp), %rdx
movq 40(%rsp), %rcx
addl $0x01, 48(%rdi)
addq $48, %rsp
movl %ecx, %r8d
xorq %rbx, %rbx
andl $7, %r8d
jz L_chacha_x64_partial_start64
L_chacha_x64_partial_start8:
movzbl (%rbp,%rbx,1), %eax
xorb (%rsi,%rbx,1), %al
movb %al, (%rdx,%rbx,1)
incl %ebx
cmpl %r8d, %ebx
jne L_chacha_x64_partial_start8
je L_chacha_x64_partial_end64
L_chacha_x64_partial_start64:
movq (%rbp,%rbx,1), %rax
xorq (%rsi,%rbx,1), %rax
movq %rax, (%rdx,%rbx,1)
addl $8, %ebx
L_chacha_x64_partial_end64:
cmpl %ecx, %ebx
jne L_chacha_x64_partial_start64
movl $0x40, %ecx
subl %ebx, %ecx
movl %ecx, 76(%rdi)
L_chacha_x64_done:
addq $0x40, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
repz retq
#ifndef __APPLE__
.size chacha_encrypt_x64,.-chacha_encrypt_x64
#endif /* __APPLE__ */
#ifdef HAVE_INTEL_AVX1
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_chacha20_avx1_rotl8:
.quad 0x605040702010003, 0xe0d0c0f0a09080b
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_chacha20_avx1_rotl16:
.quad 0x504070601000302, 0xd0c0f0e09080b0a
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_chacha20_avx1_add:
.quad 0x100000000, 0x300000002
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_chacha20_avx1_four:
.quad 0x400000004, 0x400000004
#ifndef __APPLE__
.text
.globl chacha_encrypt_avx1
.type chacha_encrypt_avx1,@function
.align 16
chacha_encrypt_avx1:
#else
.section __TEXT,__text
.globl _chacha_encrypt_avx1
.p2align 4
_chacha_encrypt_avx1:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $0x190, %rsp
movq %rsp, %r9
leaq 256(%rsp), %r10
leaq L_chacha20_avx1_rotl8(%rip), %r12
leaq L_chacha20_avx1_rotl16(%rip), %r13
leaq L_chacha20_avx1_add(%rip), %r14
leaq L_chacha20_avx1_four(%rip), %r15
addq $15, %r9
addq $15, %r10
andq $-16, %r9
andq $-16, %r10
movl %ecx, %eax
shrl $8, %eax
jz L_chacha20_avx1_end128
vpshufd $0x00, (%rdi), %xmm0
vpshufd $0x00, 4(%rdi), %xmm1
vpshufd $0x00, 8(%rdi), %xmm2
vpshufd $0x00, 12(%rdi), %xmm3
vpshufd $0x00, 16(%rdi), %xmm4
vpshufd $0x00, 20(%rdi), %xmm5
vpshufd $0x00, 24(%rdi), %xmm6
vpshufd $0x00, 28(%rdi), %xmm7
vpshufd $0x00, 32(%rdi), %xmm8
vpshufd $0x00, 36(%rdi), %xmm9
vpshufd $0x00, 40(%rdi), %xmm10
vpshufd $0x00, 44(%rdi), %xmm11
vpshufd $0x00, 48(%rdi), %xmm12
vpshufd $0x00, 52(%rdi), %xmm13
vpshufd $0x00, 56(%rdi), %xmm14
vpshufd $0x00, 60(%rdi), %xmm15
vpaddd (%r14), %xmm12, %xmm12
vmovdqa %xmm0, (%r9)
vmovdqa %xmm1, 16(%r9)
vmovdqa %xmm2, 32(%r9)
vmovdqa %xmm3, 48(%r9)
vmovdqa %xmm4, 64(%r9)
vmovdqa %xmm5, 80(%r9)
vmovdqa %xmm6, 96(%r9)
vmovdqa %xmm7, 112(%r9)
vmovdqa %xmm8, 128(%r9)
vmovdqa %xmm9, 144(%r9)
vmovdqa %xmm10, 160(%r9)
vmovdqa %xmm11, 176(%r9)
vmovdqa %xmm12, 192(%r9)
vmovdqa %xmm13, 208(%r9)
vmovdqa %xmm14, 224(%r9)
vmovdqa %xmm15, 240(%r9)
L_chacha20_avx1_start128:
vmovdqa %xmm11, 48(%r10)
movb $10, %r8b
L_chacha20_avx1_loop128:
vpaddd %xmm4, %xmm0, %xmm0
vpxor %xmm0, %xmm12, %xmm12
vmovdqa 48(%r10), %xmm11
vpshufb (%r13), %xmm12, %xmm12
vpaddd %xmm12, %xmm8, %xmm8
vpxor %xmm8, %xmm4, %xmm4
vpaddd %xmm5, %xmm1, %xmm1
vpxor %xmm1, %xmm13, %xmm13
vpshufb (%r13), %xmm13, %xmm13
vpaddd %xmm13, %xmm9, %xmm9
vpxor %xmm9, %xmm5, %xmm5
vpaddd %xmm6, %xmm2, %xmm2
vpxor %xmm2, %xmm14, %xmm14
vpshufb (%r13), %xmm14, %xmm14
vpaddd %xmm14, %xmm10, %xmm10
vpxor %xmm10, %xmm6, %xmm6
vpaddd %xmm7, %xmm3, %xmm3
vpxor %xmm3, %xmm15, %xmm15
vpshufb (%r13), %xmm15, %xmm15
vpaddd %xmm15, %xmm11, %xmm11
vpxor %xmm11, %xmm7, %xmm7
vmovdqa %xmm11, 48(%r10)
vpsrld $20, %xmm4, %xmm11
vpslld $12, %xmm4, %xmm4
vpxor %xmm11, %xmm4, %xmm4
vpsrld $20, %xmm5, %xmm11
vpslld $12, %xmm5, %xmm5
vpxor %xmm11, %xmm5, %xmm5
vpsrld $20, %xmm6, %xmm11
vpslld $12, %xmm6, %xmm6
vpxor %xmm11, %xmm6, %xmm6
vpsrld $20, %xmm7, %xmm11
vpslld $12, %xmm7, %xmm7
vpxor %xmm11, %xmm7, %xmm7
vpaddd %xmm4, %xmm0, %xmm0
vpxor %xmm0, %xmm12, %xmm12
vmovdqa 48(%r10), %xmm11
vpshufb (%r12), %xmm12, %xmm12
vpaddd %xmm12, %xmm8, %xmm8
vpxor %xmm8, %xmm4, %xmm4
vpaddd %xmm5, %xmm1, %xmm1
vpxor %xmm1, %xmm13, %xmm13
vpshufb (%r12), %xmm13, %xmm13
vpaddd %xmm13, %xmm9, %xmm9
vpxor %xmm9, %xmm5, %xmm5
vpaddd %xmm6, %xmm2, %xmm2
vpxor %xmm2, %xmm14, %xmm14
vpshufb (%r12), %xmm14, %xmm14
vpaddd %xmm14, %xmm10, %xmm10
vpxor %xmm10, %xmm6, %xmm6
vpaddd %xmm7, %xmm3, %xmm3
vpxor %xmm3, %xmm15, %xmm15
vpshufb (%r12), %xmm15, %xmm15
vpaddd %xmm15, %xmm11, %xmm11
vpxor %xmm11, %xmm7, %xmm7
vmovdqa %xmm11, 48(%r10)
vpsrld $25, %xmm4, %xmm11
vpslld $7, %xmm4, %xmm4
vpxor %xmm11, %xmm4, %xmm4
vpsrld $25, %xmm5, %xmm11
vpslld $7, %xmm5, %xmm5
vpxor %xmm11, %xmm5, %xmm5
vpsrld $25, %xmm6, %xmm11
vpslld $7, %xmm6, %xmm6
vpxor %xmm11, %xmm6, %xmm6
vpsrld $25, %xmm7, %xmm11
vpslld $7, %xmm7, %xmm7
vpxor %xmm11, %xmm7, %xmm7
vpaddd %xmm5, %xmm0, %xmm0
vpxor %xmm0, %xmm15, %xmm15
vmovdqa 48(%r10), %xmm11
vpshufb (%r13), %xmm15, %xmm15
vpaddd %xmm15, %xmm10, %xmm10
vpxor %xmm10, %xmm5, %xmm5
vpaddd %xmm6, %xmm1, %xmm1
vpxor %xmm1, %xmm12, %xmm12
vpshufb (%r13), %xmm12, %xmm12
vpaddd %xmm12, %xmm11, %xmm11
vpxor %xmm11, %xmm6, %xmm6
vpaddd %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm13, %xmm13
vpshufb (%r13), %xmm13, %xmm13
vpaddd %xmm13, %xmm8, %xmm8
vpxor %xmm8, %xmm7, %xmm7
vpaddd %xmm4, %xmm3, %xmm3
vpxor %xmm3, %xmm14, %xmm14
vpshufb (%r13), %xmm14, %xmm14
vpaddd %xmm14, %xmm9, %xmm9
vpxor %xmm9, %xmm4, %xmm4
vmovdqa %xmm11, 48(%r10)
vpsrld $20, %xmm5, %xmm11
vpslld $12, %xmm5, %xmm5
vpxor %xmm11, %xmm5, %xmm5
vpsrld $20, %xmm6, %xmm11
vpslld $12, %xmm6, %xmm6
vpxor %xmm11, %xmm6, %xmm6
vpsrld $20, %xmm7, %xmm11
vpslld $12, %xmm7, %xmm7
vpxor %xmm11, %xmm7, %xmm7
vpsrld $20, %xmm4, %xmm11
vpslld $12, %xmm4, %xmm4
vpxor %xmm11, %xmm4, %xmm4
vpaddd %xmm5, %xmm0, %xmm0
vpxor %xmm0, %xmm15, %xmm15
vmovdqa 48(%r10), %xmm11
vpshufb (%r12), %xmm15, %xmm15
vpaddd %xmm15, %xmm10, %xmm10
vpxor %xmm10, %xmm5, %xmm5
vpaddd %xmm6, %xmm1, %xmm1
vpxor %xmm1, %xmm12, %xmm12
vpshufb (%r12), %xmm12, %xmm12
vpaddd %xmm12, %xmm11, %xmm11
vpxor %xmm11, %xmm6, %xmm6
vpaddd %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm13, %xmm13
vpshufb (%r12), %xmm13, %xmm13
vpaddd %xmm13, %xmm8, %xmm8
vpxor %xmm8, %xmm7, %xmm7
vpaddd %xmm4, %xmm3, %xmm3
vpxor %xmm3, %xmm14, %xmm14
vpshufb (%r12), %xmm14, %xmm14
vpaddd %xmm14, %xmm9, %xmm9
vpxor %xmm9, %xmm4, %xmm4
vmovdqa %xmm11, 48(%r10)
vpsrld $25, %xmm5, %xmm11
vpslld $7, %xmm5, %xmm5
vpxor %xmm11, %xmm5, %xmm5
vpsrld $25, %xmm6, %xmm11
vpslld $7, %xmm6, %xmm6
vpxor %xmm11, %xmm6, %xmm6
vpsrld $25, %xmm7, %xmm11
vpslld $7, %xmm7, %xmm7
vpxor %xmm11, %xmm7, %xmm7
vpsrld $25, %xmm4, %xmm11
vpslld $7, %xmm4, %xmm4
vpxor %xmm11, %xmm4, %xmm4
decb %r8b
jnz L_chacha20_avx1_loop128
vmovdqa 48(%r10), %xmm11
vpaddd (%r9), %xmm0, %xmm0
vpaddd 16(%r9), %xmm1, %xmm1
vpaddd 32(%r9), %xmm2, %xmm2
vpaddd 48(%r9), %xmm3, %xmm3
vpaddd 64(%r9), %xmm4, %xmm4
vpaddd 80(%r9), %xmm5, %xmm5
vpaddd 96(%r9), %xmm6, %xmm6
vpaddd 112(%r9), %xmm7, %xmm7
vpaddd 128(%r9), %xmm8, %xmm8
vpaddd 144(%r9), %xmm9, %xmm9
vpaddd 160(%r9), %xmm10, %xmm10
vpaddd 176(%r9), %xmm11, %xmm11
vpaddd 192(%r9), %xmm12, %xmm12
vpaddd 208(%r9), %xmm13, %xmm13
vpaddd 224(%r9), %xmm14, %xmm14
vpaddd 240(%r9), %xmm15, %xmm15
vmovdqa %xmm8, (%r10)
vmovdqa %xmm9, 16(%r10)
vmovdqa %xmm10, 32(%r10)
vmovdqa %xmm11, 48(%r10)
vmovdqa %xmm12, 64(%r10)
vmovdqa %xmm13, 80(%r10)
vmovdqa %xmm14, 96(%r10)
vmovdqa %xmm15, 112(%r10)
vpunpckldq %xmm1, %xmm0, %xmm8
vpunpckldq %xmm3, %xmm2, %xmm9
vpunpckhdq %xmm1, %xmm0, %xmm12
vpunpckhdq %xmm3, %xmm2, %xmm13
vpunpckldq %xmm5, %xmm4, %xmm10
vpunpckldq %xmm7, %xmm6, %xmm11
vpunpckhdq %xmm5, %xmm4, %xmm14
vpunpckhdq %xmm7, %xmm6, %xmm15
vpunpcklqdq %xmm9, %xmm8, %xmm0
vpunpcklqdq %xmm11, %xmm10, %xmm1
vpunpckhqdq %xmm9, %xmm8, %xmm2
vpunpckhqdq %xmm11, %xmm10, %xmm3
vpunpcklqdq %xmm13, %xmm12, %xmm4
vpunpcklqdq %xmm15, %xmm14, %xmm5
vpunpckhqdq %xmm13, %xmm12, %xmm6
vpunpckhqdq %xmm15, %xmm14, %xmm7
vmovdqu (%rsi), %xmm8
vmovdqu 16(%rsi), %xmm9
vmovdqu 64(%rsi), %xmm10
vmovdqu 80(%rsi), %xmm11
vmovdqu 128(%rsi), %xmm12
vmovdqu 144(%rsi), %xmm13
vmovdqu 192(%rsi), %xmm14
vmovdqu 208(%rsi), %xmm15
vpxor %xmm8, %xmm0, %xmm0
vpxor %xmm9, %xmm1, %xmm1
vpxor %xmm10, %xmm2, %xmm2
vpxor %xmm11, %xmm3, %xmm3
vpxor %xmm12, %xmm4, %xmm4
vpxor %xmm13, %xmm5, %xmm5
vpxor %xmm14, %xmm6, %xmm6
vpxor %xmm15, %xmm7, %xmm7
vmovdqu %xmm0, (%rdx)
vmovdqu %xmm1, 16(%rdx)
vmovdqu %xmm2, 64(%rdx)
vmovdqu %xmm3, 80(%rdx)
vmovdqu %xmm4, 128(%rdx)
vmovdqu %xmm5, 144(%rdx)
vmovdqu %xmm6, 192(%rdx)
vmovdqu %xmm7, 208(%rdx)
vmovdqa (%r10), %xmm0
vmovdqa 16(%r10), %xmm1
vmovdqa 32(%r10), %xmm2
vmovdqa 48(%r10), %xmm3
vmovdqa 64(%r10), %xmm4
vmovdqa 80(%r10), %xmm5
vmovdqa 96(%r10), %xmm6
vmovdqa 112(%r10), %xmm7
vpunpckldq %xmm1, %xmm0, %xmm8
vpunpckldq %xmm3, %xmm2, %xmm9
vpunpckhdq %xmm1, %xmm0, %xmm12
vpunpckhdq %xmm3, %xmm2, %xmm13
vpunpckldq %xmm5, %xmm4, %xmm10
vpunpckldq %xmm7, %xmm6, %xmm11
vpunpckhdq %xmm5, %xmm4, %xmm14
vpunpckhdq %xmm7, %xmm6, %xmm15
vpunpcklqdq %xmm9, %xmm8, %xmm0
vpunpcklqdq %xmm11, %xmm10, %xmm1
vpunpckhqdq %xmm9, %xmm8, %xmm2
vpunpckhqdq %xmm11, %xmm10, %xmm3
vpunpcklqdq %xmm13, %xmm12, %xmm4
vpunpcklqdq %xmm15, %xmm14, %xmm5
vpunpckhqdq %xmm13, %xmm12, %xmm6
vpunpckhqdq %xmm15, %xmm14, %xmm7
vmovdqu 32(%rsi), %xmm8
vmovdqu 48(%rsi), %xmm9
vmovdqu 96(%rsi), %xmm10
vmovdqu 112(%rsi), %xmm11
vmovdqu 160(%rsi), %xmm12
vmovdqu 176(%rsi), %xmm13
vmovdqu 224(%rsi), %xmm14
vmovdqu 240(%rsi), %xmm15
vpxor %xmm8, %xmm0, %xmm0
vpxor %xmm9, %xmm1, %xmm1
vpxor %xmm10, %xmm2, %xmm2
vpxor %xmm11, %xmm3, %xmm3
vpxor %xmm12, %xmm4, %xmm4
vpxor %xmm13, %xmm5, %xmm5
vpxor %xmm14, %xmm6, %xmm6
vpxor %xmm15, %xmm7, %xmm7
vmovdqu %xmm0, 32(%rdx)
vmovdqu %xmm1, 48(%rdx)
vmovdqu %xmm2, 96(%rdx)
vmovdqu %xmm3, 112(%rdx)
vmovdqu %xmm4, 160(%rdx)
vmovdqu %xmm5, 176(%rdx)
vmovdqu %xmm6, 224(%rdx)
vmovdqu %xmm7, 240(%rdx)
vmovdqa 192(%r9), %xmm12
addq $0x100, %rsi
addq $0x100, %rdx
vpaddd (%r15), %xmm12, %xmm12
subl $0x100, %ecx
vmovdqa %xmm12, 192(%r9)
cmpl $0x100, %ecx
jl L_chacha20_avx1_done128
vmovdqa (%r9), %xmm0
vmovdqa 16(%r9), %xmm1
vmovdqa 32(%r9), %xmm2
vmovdqa 48(%r9), %xmm3
vmovdqa 64(%r9), %xmm4
vmovdqa 80(%r9), %xmm5
vmovdqa 96(%r9), %xmm6
vmovdqa 112(%r9), %xmm7
vmovdqa 128(%r9), %xmm8
vmovdqa 144(%r9), %xmm9
vmovdqa 160(%r9), %xmm10
vmovdqa 176(%r9), %xmm11
vmovdqa 192(%r9), %xmm12
vmovdqa 208(%r9), %xmm13
vmovdqa 224(%r9), %xmm14
vmovdqa 240(%r9), %xmm15
jmp L_chacha20_avx1_start128
L_chacha20_avx1_done128:
shll $2, %eax
addl %eax, 48(%rdi)
L_chacha20_avx1_end128:
cmpl $0x40, %ecx
jl L_chacha20_avx1_block_done
L_chacha20_avx1_block_start:
vmovdqu (%rdi), %xmm0
vmovdqu 16(%rdi), %xmm1
vmovdqu 32(%rdi), %xmm2
vmovdqu 48(%rdi), %xmm3
vmovdqa %xmm0, %xmm5
vmovdqa %xmm1, %xmm6
vmovdqa %xmm2, %xmm7
vmovdqa %xmm3, %xmm8
movb $10, %al
L_chacha20_avx1_block_crypt_start:
vpaddd %xmm1, %xmm0, %xmm0
vpxor %xmm0, %xmm3, %xmm3
vpshufb (%r13), %xmm3, %xmm3
vpaddd %xmm3, %xmm2, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpsrld $20, %xmm1, %xmm4
vpslld $12, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpaddd %xmm1, %xmm0, %xmm0
vpxor %xmm0, %xmm3, %xmm3
vpshufb (%r12), %xmm3, %xmm3
vpaddd %xmm3, %xmm2, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpsrld $25, %xmm1, %xmm4
vpslld $7, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpshufd $57, %xmm1, %xmm1
vpshufd $0x4e, %xmm2, %xmm2
vpshufd $0x93, %xmm3, %xmm3
vpaddd %xmm1, %xmm0, %xmm0
vpxor %xmm0, %xmm3, %xmm3
vpshufb (%r13), %xmm3, %xmm3
vpaddd %xmm3, %xmm2, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpsrld $20, %xmm1, %xmm4
vpslld $12, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpaddd %xmm1, %xmm0, %xmm0
vpxor %xmm0, %xmm3, %xmm3
vpshufb (%r12), %xmm3, %xmm3
vpaddd %xmm3, %xmm2, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpsrld $25, %xmm1, %xmm4
vpslld $7, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpshufd $0x93, %xmm1, %xmm1
vpshufd $0x4e, %xmm2, %xmm2
vpshufd $57, %xmm3, %xmm3
decb %al
jnz L_chacha20_avx1_block_crypt_start
vpaddd %xmm5, %xmm0, %xmm0
vpaddd %xmm6, %xmm1, %xmm1
vpaddd %xmm7, %xmm2, %xmm2
vpaddd %xmm8, %xmm3, %xmm3
vmovdqu (%rsi), %xmm5
vmovdqu 16(%rsi), %xmm6
vmovdqu 32(%rsi), %xmm7
vmovdqu 48(%rsi), %xmm8
vpxor %xmm5, %xmm0, %xmm0
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm8, %xmm3, %xmm3
vmovdqu %xmm0, (%rdx)
vmovdqu %xmm1, 16(%rdx)
vmovdqu %xmm2, 32(%rdx)
vmovdqu %xmm3, 48(%rdx)
addl $0x01, 48(%rdi)
subl $0x40, %ecx
addq $0x40, %rsi
addq $0x40, %rdx
cmpl $0x40, %ecx
jge L_chacha20_avx1_block_start
L_chacha20_avx1_block_done:
cmpl $0x00, %ecx
je L_chacha20_avx1_partial_done
leaq 80(%rdi), %r10
vmovdqu (%rdi), %xmm0
vmovdqu 16(%rdi), %xmm1
vmovdqu 32(%rdi), %xmm2
vmovdqu 48(%rdi), %xmm3
vmovdqa %xmm0, %xmm5
vmovdqa %xmm1, %xmm6
vmovdqa %xmm2, %xmm7
vmovdqa %xmm3, %xmm8
movb $10, %al
L_chacha20_avx1_partial_crypt_start:
vpaddd %xmm1, %xmm0, %xmm0
vpxor %xmm0, %xmm3, %xmm3
vpshufb (%r13), %xmm3, %xmm3
vpaddd %xmm3, %xmm2, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpsrld $20, %xmm1, %xmm4
vpslld $12, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpaddd %xmm1, %xmm0, %xmm0
vpxor %xmm0, %xmm3, %xmm3
vpshufb (%r12), %xmm3, %xmm3
vpaddd %xmm3, %xmm2, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpsrld $25, %xmm1, %xmm4
vpslld $7, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpshufd $57, %xmm1, %xmm1
vpshufd $0x4e, %xmm2, %xmm2
vpshufd $0x93, %xmm3, %xmm3
vpaddd %xmm1, %xmm0, %xmm0
vpxor %xmm0, %xmm3, %xmm3
vpshufb (%r13), %xmm3, %xmm3
vpaddd %xmm3, %xmm2, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpsrld $20, %xmm1, %xmm4
vpslld $12, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpaddd %xmm1, %xmm0, %xmm0
vpxor %xmm0, %xmm3, %xmm3
vpshufb (%r12), %xmm3, %xmm3
vpaddd %xmm3, %xmm2, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpsrld $25, %xmm1, %xmm4
vpslld $7, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpshufd $0x93, %xmm1, %xmm1
vpshufd $0x4e, %xmm2, %xmm2
vpshufd $57, %xmm3, %xmm3
decb %al
jnz L_chacha20_avx1_partial_crypt_start
vpaddd %xmm5, %xmm0, %xmm0
vpaddd %xmm6, %xmm1, %xmm1
vpaddd %xmm7, %xmm2, %xmm2
vpaddd %xmm8, %xmm3, %xmm3
vmovdqu %xmm0, (%r10)
vmovdqu %xmm1, 16(%r10)
vmovdqu %xmm2, 32(%r10)
vmovdqu %xmm3, 48(%r10)
addl $0x01, 48(%rdi)
movl %ecx, %r8d
xorq %r11, %r11
andl $7, %r8d
jz L_chacha20_avx1_partial_start64
L_chacha20_avx1_partial_start8:
movzbl (%r10,%r11,1), %eax
xorb (%rsi,%r11,1), %al
movb %al, (%rdx,%r11,1)
incl %r11d
cmpl %r8d, %r11d
jne L_chacha20_avx1_partial_start8
je L_chacha20_avx1_partial_end64
L_chacha20_avx1_partial_start64:
movq (%r10,%r11,1), %rax
xorq (%rsi,%r11,1), %rax
movq %rax, (%rdx,%r11,1)
addl $8, %r11d
L_chacha20_avx1_partial_end64:
cmpl %ecx, %r11d
jne L_chacha20_avx1_partial_start64
movl $0x40, %r8d
subl %r11d, %r8d
movl %r8d, 76(%rdi)
L_chacha20_avx1_partial_done:
vzeroupper
addq $0x190, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size chacha_encrypt_avx1,.-chacha_encrypt_avx1
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX1 */
#ifdef HAVE_INTEL_AVX2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_chacha20_avx2_rotl8:
.quad 0x605040702010003, 0xe0d0c0f0a09080b
.quad 0x605040702010003, 0xe0d0c0f0a09080b
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_chacha20_avx2_rotl16:
.quad 0x504070601000302, 0xd0c0f0e09080b0a
.quad 0x504070601000302, 0xd0c0f0e09080b0a
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_chacha20_avx2_add:
.quad 0x100000000, 0x300000002
.quad 0x500000004, 0x700000006
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_chacha20_avx2_eight:
.quad 0x800000008, 0x800000008
.quad 0x800000008, 0x800000008
#ifndef __APPLE__
.text
.globl chacha_encrypt_avx2
.type chacha_encrypt_avx2,@function
.align 16
chacha_encrypt_avx2:
#else
.section __TEXT,__text
.globl _chacha_encrypt_avx2
.p2align 4
_chacha_encrypt_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
subq $0x320, %rsp
movq %rsp, %r9
leaq L_chacha20_avx2_rotl8(%rip), %r11
leaq L_chacha20_avx2_rotl16(%rip), %r12
leaq L_chacha20_avx2_add(%rip), %r13
leaq L_chacha20_avx2_eight(%rip), %r14
leaq 512(%rsp), %r10
addq $31, %r9
addq $31, %r10
andq $-32, %r9
andq $-32, %r10
movl %ecx, %eax
shrl $9, %eax
jz L_chacha20_avx2_end256
vpbroadcastd (%rdi), %ymm0
vpbroadcastd 4(%rdi), %ymm1
vpbroadcastd 8(%rdi), %ymm2
vpbroadcastd 12(%rdi), %ymm3
vpbroadcastd 16(%rdi), %ymm4
vpbroadcastd 20(%rdi), %ymm5
vpbroadcastd 24(%rdi), %ymm6
vpbroadcastd 28(%rdi), %ymm7
vpbroadcastd 32(%rdi), %ymm8
vpbroadcastd 36(%rdi), %ymm9
vpbroadcastd 40(%rdi), %ymm10
vpbroadcastd 44(%rdi), %ymm11
vpbroadcastd 48(%rdi), %ymm12
vpbroadcastd 52(%rdi), %ymm13
vpbroadcastd 56(%rdi), %ymm14
vpbroadcastd 60(%rdi), %ymm15
vpaddd (%r13), %ymm12, %ymm12
vmovdqa %ymm0, (%r9)
vmovdqa %ymm1, 32(%r9)
vmovdqa %ymm2, 64(%r9)
vmovdqa %ymm3, 96(%r9)
vmovdqa %ymm4, 128(%r9)
vmovdqa %ymm5, 160(%r9)
vmovdqa %ymm6, 192(%r9)
vmovdqa %ymm7, 224(%r9)
vmovdqa %ymm8, 256(%r9)
vmovdqa %ymm9, 288(%r9)
vmovdqa %ymm10, 320(%r9)
vmovdqa %ymm11, 352(%r9)
vmovdqa %ymm12, 384(%r9)
vmovdqa %ymm13, 416(%r9)
vmovdqa %ymm14, 448(%r9)
vmovdqa %ymm15, 480(%r9)
L_chacha20_avx2_start256:
movb $10, %r8b
vmovdqa %ymm11, 96(%r10)
L_chacha20_avx2_loop256:
vpaddd %ymm4, %ymm0, %ymm0
vpxor %ymm0, %ymm12, %ymm12
vmovdqa 96(%r10), %ymm11
vpshufb (%r12), %ymm12, %ymm12
vpaddd %ymm12, %ymm8, %ymm8
vpxor %ymm8, %ymm4, %ymm4
vpaddd %ymm5, %ymm1, %ymm1
vpxor %ymm1, %ymm13, %ymm13
vpshufb (%r12), %ymm13, %ymm13
vpaddd %ymm13, %ymm9, %ymm9
vpxor %ymm9, %ymm5, %ymm5
vpaddd %ymm6, %ymm2, %ymm2
vpxor %ymm2, %ymm14, %ymm14
vpshufb (%r12), %ymm14, %ymm14
vpaddd %ymm14, %ymm10, %ymm10
vpxor %ymm10, %ymm6, %ymm6
vpaddd %ymm7, %ymm3, %ymm3
vpxor %ymm3, %ymm15, %ymm15
vpshufb (%r12), %ymm15, %ymm15
vpaddd %ymm15, %ymm11, %ymm11
vpxor %ymm11, %ymm7, %ymm7
vmovdqa %ymm11, 96(%r10)
vpsrld $20, %ymm4, %ymm11
vpslld $12, %ymm4, %ymm4
vpxor %ymm11, %ymm4, %ymm4
vpsrld $20, %ymm5, %ymm11
vpslld $12, %ymm5, %ymm5
vpxor %ymm11, %ymm5, %ymm5
vpsrld $20, %ymm6, %ymm11
vpslld $12, %ymm6, %ymm6
vpxor %ymm11, %ymm6, %ymm6
vpsrld $20, %ymm7, %ymm11
vpslld $12, %ymm7, %ymm7
vpxor %ymm11, %ymm7, %ymm7
vpaddd %ymm4, %ymm0, %ymm0
vpxor %ymm0, %ymm12, %ymm12
vmovdqa 96(%r10), %ymm11
vpshufb (%r11), %ymm12, %ymm12
vpaddd %ymm12, %ymm8, %ymm8
vpxor %ymm8, %ymm4, %ymm4
vpaddd %ymm5, %ymm1, %ymm1
vpxor %ymm1, %ymm13, %ymm13
vpshufb (%r11), %ymm13, %ymm13
vpaddd %ymm13, %ymm9, %ymm9
vpxor %ymm9, %ymm5, %ymm5
vpaddd %ymm6, %ymm2, %ymm2
vpxor %ymm2, %ymm14, %ymm14
vpshufb (%r11), %ymm14, %ymm14
vpaddd %ymm14, %ymm10, %ymm10
vpxor %ymm10, %ymm6, %ymm6
vpaddd %ymm7, %ymm3, %ymm3
vpxor %ymm3, %ymm15, %ymm15
vpshufb (%r11), %ymm15, %ymm15
vpaddd %ymm15, %ymm11, %ymm11
vpxor %ymm11, %ymm7, %ymm7
vmovdqa %ymm11, 96(%r10)
vpsrld $25, %ymm4, %ymm11
vpslld $7, %ymm4, %ymm4
vpxor %ymm11, %ymm4, %ymm4
vpsrld $25, %ymm5, %ymm11
vpslld $7, %ymm5, %ymm5
vpxor %ymm11, %ymm5, %ymm5
vpsrld $25, %ymm6, %ymm11
vpslld $7, %ymm6, %ymm6
vpxor %ymm11, %ymm6, %ymm6
vpsrld $25, %ymm7, %ymm11
vpslld $7, %ymm7, %ymm7
vpxor %ymm11, %ymm7, %ymm7
vpaddd %ymm5, %ymm0, %ymm0
vpxor %ymm0, %ymm15, %ymm15
vmovdqa 96(%r10), %ymm11
vpshufb (%r12), %ymm15, %ymm15
vpaddd %ymm15, %ymm10, %ymm10
vpxor %ymm10, %ymm5, %ymm5
vpaddd %ymm6, %ymm1, %ymm1
vpxor %ymm1, %ymm12, %ymm12
vpshufb (%r12), %ymm12, %ymm12
vpaddd %ymm12, %ymm11, %ymm11
vpxor %ymm11, %ymm6, %ymm6
vpaddd %ymm7, %ymm2, %ymm2
vpxor %ymm2, %ymm13, %ymm13
vpshufb (%r12), %ymm13, %ymm13
vpaddd %ymm13, %ymm8, %ymm8
vpxor %ymm8, %ymm7, %ymm7
vpaddd %ymm4, %ymm3, %ymm3
vpxor %ymm3, %ymm14, %ymm14
vpshufb (%r12), %ymm14, %ymm14
vpaddd %ymm14, %ymm9, %ymm9
vpxor %ymm9, %ymm4, %ymm4
vmovdqa %ymm11, 96(%r10)
vpsrld $20, %ymm5, %ymm11
vpslld $12, %ymm5, %ymm5
vpxor %ymm11, %ymm5, %ymm5
vpsrld $20, %ymm6, %ymm11
vpslld $12, %ymm6, %ymm6
vpxor %ymm11, %ymm6, %ymm6
vpsrld $20, %ymm7, %ymm11
vpslld $12, %ymm7, %ymm7
vpxor %ymm11, %ymm7, %ymm7
vpsrld $20, %ymm4, %ymm11
vpslld $12, %ymm4, %ymm4
vpxor %ymm11, %ymm4, %ymm4
vpaddd %ymm5, %ymm0, %ymm0
vpxor %ymm0, %ymm15, %ymm15
vmovdqa 96(%r10), %ymm11
vpshufb (%r11), %ymm15, %ymm15
vpaddd %ymm15, %ymm10, %ymm10
vpxor %ymm10, %ymm5, %ymm5
vpaddd %ymm6, %ymm1, %ymm1
vpxor %ymm1, %ymm12, %ymm12
vpshufb (%r11), %ymm12, %ymm12
vpaddd %ymm12, %ymm11, %ymm11
vpxor %ymm11, %ymm6, %ymm6
vpaddd %ymm7, %ymm2, %ymm2
vpxor %ymm2, %ymm13, %ymm13
vpshufb (%r11), %ymm13, %ymm13
vpaddd %ymm13, %ymm8, %ymm8
vpxor %ymm8, %ymm7, %ymm7
vpaddd %ymm4, %ymm3, %ymm3
vpxor %ymm3, %ymm14, %ymm14
vpshufb (%r11), %ymm14, %ymm14
vpaddd %ymm14, %ymm9, %ymm9
vpxor %ymm9, %ymm4, %ymm4
vmovdqa %ymm11, 96(%r10)
vpsrld $25, %ymm5, %ymm11
vpslld $7, %ymm5, %ymm5
vpxor %ymm11, %ymm5, %ymm5
vpsrld $25, %ymm6, %ymm11
vpslld $7, %ymm6, %ymm6
vpxor %ymm11, %ymm6, %ymm6
vpsrld $25, %ymm7, %ymm11
vpslld $7, %ymm7, %ymm7
vpxor %ymm11, %ymm7, %ymm7
vpsrld $25, %ymm4, %ymm11
vpslld $7, %ymm4, %ymm4
vpxor %ymm11, %ymm4, %ymm4
decb %r8b
jnz L_chacha20_avx2_loop256
vmovdqa 96(%r10), %ymm11
vpaddd (%r9), %ymm0, %ymm0
vpaddd 32(%r9), %ymm1, %ymm1
vpaddd 64(%r9), %ymm2, %ymm2
vpaddd 96(%r9), %ymm3, %ymm3
vpaddd 128(%r9), %ymm4, %ymm4
vpaddd 160(%r9), %ymm5, %ymm5
vpaddd 192(%r9), %ymm6, %ymm6
vpaddd 224(%r9), %ymm7, %ymm7
vpaddd 256(%r9), %ymm8, %ymm8
vpaddd 288(%r9), %ymm9, %ymm9
vpaddd 320(%r9), %ymm10, %ymm10
vpaddd 352(%r9), %ymm11, %ymm11
vpaddd 384(%r9), %ymm12, %ymm12
vpaddd 416(%r9), %ymm13, %ymm13
vpaddd 448(%r9), %ymm14, %ymm14
vpaddd 480(%r9), %ymm15, %ymm15
vmovdqa %ymm8, (%r10)
vmovdqa %ymm9, 32(%r10)
vmovdqa %ymm10, 64(%r10)
vmovdqa %ymm11, 96(%r10)
vmovdqa %ymm12, 128(%r10)
vmovdqa %ymm13, 160(%r10)
vmovdqa %ymm14, 192(%r10)
vmovdqa %ymm15, 224(%r10)
vpunpckldq %ymm1, %ymm0, %ymm8
vpunpckldq %ymm3, %ymm2, %ymm9
vpunpckhdq %ymm1, %ymm0, %ymm12
vpunpckhdq %ymm3, %ymm2, %ymm13
vpunpckldq %ymm5, %ymm4, %ymm10
vpunpckldq %ymm7, %ymm6, %ymm11
vpunpckhdq %ymm5, %ymm4, %ymm14
vpunpckhdq %ymm7, %ymm6, %ymm15
vpunpcklqdq %ymm9, %ymm8, %ymm0
vpunpcklqdq %ymm11, %ymm10, %ymm1
vpunpckhqdq %ymm9, %ymm8, %ymm2
vpunpckhqdq %ymm11, %ymm10, %ymm3
vpunpcklqdq %ymm13, %ymm12, %ymm4
vpunpcklqdq %ymm15, %ymm14, %ymm5
vpunpckhqdq %ymm13, %ymm12, %ymm6
vpunpckhqdq %ymm15, %ymm14, %ymm7
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vperm2i128 $32, %ymm3, %ymm2, %ymm9
vperm2i128 $49, %ymm1, %ymm0, %ymm12
vperm2i128 $49, %ymm3, %ymm2, %ymm13
vperm2i128 $32, %ymm5, %ymm4, %ymm10
vperm2i128 $32, %ymm7, %ymm6, %ymm11
vperm2i128 $49, %ymm5, %ymm4, %ymm14
vperm2i128 $49, %ymm7, %ymm6, %ymm15
vmovdqu (%rsi), %ymm0
vmovdqu 64(%rsi), %ymm1
vmovdqu 128(%rsi), %ymm2
vmovdqu 192(%rsi), %ymm3
vmovdqu 256(%rsi), %ymm4
vmovdqu 320(%rsi), %ymm5
vmovdqu 384(%rsi), %ymm6
vmovdqu 448(%rsi), %ymm7
vpxor %ymm0, %ymm8, %ymm8
vpxor %ymm1, %ymm9, %ymm9
vpxor %ymm2, %ymm10, %ymm10
vpxor %ymm3, %ymm11, %ymm11
vpxor %ymm4, %ymm12, %ymm12
vpxor %ymm5, %ymm13, %ymm13
vpxor %ymm6, %ymm14, %ymm14
vpxor %ymm7, %ymm15, %ymm15
vmovdqu %ymm8, (%rdx)
vmovdqu %ymm9, 64(%rdx)
vmovdqu %ymm10, 128(%rdx)
vmovdqu %ymm11, 192(%rdx)
vmovdqu %ymm12, 256(%rdx)
vmovdqu %ymm13, 320(%rdx)
vmovdqu %ymm14, 384(%rdx)
vmovdqu %ymm15, 448(%rdx)
vmovdqa (%r10), %ymm0
vmovdqa 32(%r10), %ymm1
vmovdqa 64(%r10), %ymm2
vmovdqa 96(%r10), %ymm3
vmovdqa 128(%r10), %ymm4
vmovdqa 160(%r10), %ymm5
vmovdqa 192(%r10), %ymm6
vmovdqa 224(%r10), %ymm7
vpunpckldq %ymm1, %ymm0, %ymm8
vpunpckldq %ymm3, %ymm2, %ymm9
vpunpckhdq %ymm1, %ymm0, %ymm12
vpunpckhdq %ymm3, %ymm2, %ymm13
vpunpckldq %ymm5, %ymm4, %ymm10
vpunpckldq %ymm7, %ymm6, %ymm11
vpunpckhdq %ymm5, %ymm4, %ymm14
vpunpckhdq %ymm7, %ymm6, %ymm15
vpunpcklqdq %ymm9, %ymm8, %ymm0
vpunpcklqdq %ymm11, %ymm10, %ymm1
vpunpckhqdq %ymm9, %ymm8, %ymm2
vpunpckhqdq %ymm11, %ymm10, %ymm3
vpunpcklqdq %ymm13, %ymm12, %ymm4
vpunpcklqdq %ymm15, %ymm14, %ymm5
vpunpckhqdq %ymm13, %ymm12, %ymm6
vpunpckhqdq %ymm15, %ymm14, %ymm7
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vperm2i128 $32, %ymm3, %ymm2, %ymm9
vperm2i128 $49, %ymm1, %ymm0, %ymm12
vperm2i128 $49, %ymm3, %ymm2, %ymm13
vperm2i128 $32, %ymm5, %ymm4, %ymm10
vperm2i128 $32, %ymm7, %ymm6, %ymm11
vperm2i128 $49, %ymm5, %ymm4, %ymm14
vperm2i128 $49, %ymm7, %ymm6, %ymm15
vmovdqu 32(%rsi), %ymm0
vmovdqu 96(%rsi), %ymm1
vmovdqu 160(%rsi), %ymm2
vmovdqu 224(%rsi), %ymm3
vmovdqu 288(%rsi), %ymm4
vmovdqu 352(%rsi), %ymm5
vmovdqu 416(%rsi), %ymm6
vmovdqu 480(%rsi), %ymm7
vpxor %ymm0, %ymm8, %ymm8
vpxor %ymm1, %ymm9, %ymm9
vpxor %ymm2, %ymm10, %ymm10
vpxor %ymm3, %ymm11, %ymm11
vpxor %ymm4, %ymm12, %ymm12
vpxor %ymm5, %ymm13, %ymm13
vpxor %ymm6, %ymm14, %ymm14
vpxor %ymm7, %ymm15, %ymm15
vmovdqu %ymm8, 32(%rdx)
vmovdqu %ymm9, 96(%rdx)
vmovdqu %ymm10, 160(%rdx)
vmovdqu %ymm11, 224(%rdx)
vmovdqu %ymm12, 288(%rdx)
vmovdqu %ymm13, 352(%rdx)
vmovdqu %ymm14, 416(%rdx)
vmovdqu %ymm15, 480(%rdx)
vmovdqa 384(%r9), %ymm12
addq $0x200, %rsi
addq $0x200, %rdx
vpaddd (%r14), %ymm12, %ymm12
subl $0x200, %ecx
vmovdqa %ymm12, 384(%r9)
cmpl $0x200, %ecx
jl L_chacha20_avx2_done256
vmovdqa (%r9), %ymm0
vmovdqa 32(%r9), %ymm1
vmovdqa 64(%r9), %ymm2
vmovdqa 96(%r9), %ymm3
vmovdqa 128(%r9), %ymm4
vmovdqa 160(%r9), %ymm5
vmovdqa 192(%r9), %ymm6
vmovdqa 224(%r9), %ymm7
vmovdqa 256(%r9), %ymm8
vmovdqa 288(%r9), %ymm9
vmovdqa 320(%r9), %ymm10
vmovdqa 352(%r9), %ymm11
vmovdqa 384(%r9), %ymm12
vmovdqa 416(%r9), %ymm13
vmovdqa 448(%r9), %ymm14
vmovdqa 480(%r9), %ymm15
jmp L_chacha20_avx2_start256
L_chacha20_avx2_done256:
shll $3, %eax
addl %eax, 48(%rdi)
L_chacha20_avx2_end256:
#ifndef __APPLE__
callq chacha_encrypt_avx1@plt
#else
callq _chacha_encrypt_avx1
#endif /* __APPLE__ */
vzeroupper
addq $0x320, %rsp
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size chacha_encrypt_avx2,.-chacha_encrypt_avx2
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* WOLFSSL_X86_64_BUILD */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
aerisarn/mesa-uwp
| 63,460
|
src/util/blake3/blake3_sse41_x86-64_windows_gnu.S
|
.intel_syntax noprefix
.global blake3_hash_many_sse41
.global _blake3_hash_many_sse41
.global blake3_compress_in_place_sse41
.global _blake3_compress_in_place_sse41
.global blake3_compress_xof_sse41
.global _blake3_compress_xof_sse41
.section .text
.p2align 6
_blake3_hash_many_sse41:
blake3_hash_many_sse41:
push r15
push r14
push r13
push r12
push rsi
push rdi
push rbx
push rbp
mov rbp, rsp
sub rsp, 528
and rsp, 0xFFFFFFFFFFFFFFC0
movdqa xmmword ptr [rsp+0x170], xmm6
movdqa xmmword ptr [rsp+0x180], xmm7
movdqa xmmword ptr [rsp+0x190], xmm8
movdqa xmmword ptr [rsp+0x1A0], xmm9
movdqa xmmword ptr [rsp+0x1B0], xmm10
movdqa xmmword ptr [rsp+0x1C0], xmm11
movdqa xmmword ptr [rsp+0x1D0], xmm12
movdqa xmmword ptr [rsp+0x1E0], xmm13
movdqa xmmword ptr [rsp+0x1F0], xmm14
movdqa xmmword ptr [rsp+0x200], xmm15
mov rdi, rcx
mov rsi, rdx
mov rdx, r8
mov rcx, r9
mov r8, qword ptr [rbp+0x68]
movzx r9, byte ptr [rbp+0x70]
neg r9d
movd xmm0, r9d
pshufd xmm0, xmm0, 0x00
movdqa xmmword ptr [rsp+0x130], xmm0
movdqa xmm1, xmm0
pand xmm1, xmmword ptr [ADD0+rip]
pand xmm0, xmmword ptr [ADD1+rip]
movdqa xmmword ptr [rsp+0x150], xmm0
movd xmm0, r8d
pshufd xmm0, xmm0, 0x00
paddd xmm0, xmm1
movdqa xmmword ptr [rsp+0x110], xmm0
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm1, xmm0
shr r8, 32
movd xmm2, r8d
pshufd xmm2, xmm2, 0x00
psubd xmm2, xmm1
movdqa xmmword ptr [rsp+0x120], xmm2
mov rbx, qword ptr [rbp+0x90]
mov r15, rdx
shl r15, 6
movzx r13d, byte ptr [rbp+0x78]
movzx r12d, byte ptr [rbp+0x88]
cmp rsi, 4
jc 3f
2:
movdqu xmm3, xmmword ptr [rcx]
pshufd xmm0, xmm3, 0x00
pshufd xmm1, xmm3, 0x55
pshufd xmm2, xmm3, 0xAA
pshufd xmm3, xmm3, 0xFF
movdqu xmm7, xmmword ptr [rcx+0x10]
pshufd xmm4, xmm7, 0x00
pshufd xmm5, xmm7, 0x55
pshufd xmm6, xmm7, 0xAA
pshufd xmm7, xmm7, 0xFF
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
mov r10, qword ptr [rdi+0x10]
mov r11, qword ptr [rdi+0x18]
movzx eax, byte ptr [rbp+0x80]
or eax, r13d
xor edx, edx
9:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movdqu xmm8, xmmword ptr [r8+rdx-0x40]
movdqu xmm9, xmmword ptr [r9+rdx-0x40]
movdqu xmm10, xmmword ptr [r10+rdx-0x40]
movdqu xmm11, xmmword ptr [r11+rdx-0x40]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp], xmm8
movdqa xmmword ptr [rsp+0x10], xmm9
movdqa xmmword ptr [rsp+0x20], xmm12
movdqa xmmword ptr [rsp+0x30], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x30]
movdqu xmm9, xmmword ptr [r9+rdx-0x30]
movdqu xmm10, xmmword ptr [r10+rdx-0x30]
movdqu xmm11, xmmword ptr [r11+rdx-0x30]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x40], xmm8
movdqa xmmword ptr [rsp+0x50], xmm9
movdqa xmmword ptr [rsp+0x60], xmm12
movdqa xmmword ptr [rsp+0x70], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x20]
movdqu xmm9, xmmword ptr [r9+rdx-0x20]
movdqu xmm10, xmmword ptr [r10+rdx-0x20]
movdqu xmm11, xmmword ptr [r11+rdx-0x20]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0x80], xmm8
movdqa xmmword ptr [rsp+0x90], xmm9
movdqa xmmword ptr [rsp+0xA0], xmm12
movdqa xmmword ptr [rsp+0xB0], xmm13
movdqu xmm8, xmmword ptr [r8+rdx-0x10]
movdqu xmm9, xmmword ptr [r9+rdx-0x10]
movdqu xmm10, xmmword ptr [r10+rdx-0x10]
movdqu xmm11, xmmword ptr [r11+rdx-0x10]
movdqa xmm12, xmm8
punpckldq xmm8, xmm9
punpckhdq xmm12, xmm9
movdqa xmm14, xmm10
punpckldq xmm10, xmm11
punpckhdq xmm14, xmm11
movdqa xmm9, xmm8
punpcklqdq xmm8, xmm10
punpckhqdq xmm9, xmm10
movdqa xmm13, xmm12
punpcklqdq xmm12, xmm14
punpckhqdq xmm13, xmm14
movdqa xmmword ptr [rsp+0xC0], xmm8
movdqa xmmword ptr [rsp+0xD0], xmm9
movdqa xmmword ptr [rsp+0xE0], xmm12
movdqa xmmword ptr [rsp+0xF0], xmm13
movdqa xmm9, xmmword ptr [BLAKE3_IV_1+rip]
movdqa xmm10, xmmword ptr [BLAKE3_IV_2+rip]
movdqa xmm11, xmmword ptr [BLAKE3_IV_3+rip]
movdqa xmm12, xmmword ptr [rsp+0x110]
movdqa xmm13, xmmword ptr [rsp+0x120]
movdqa xmm14, xmmword ptr [BLAKE3_BLOCK_LEN+rip]
movd xmm15, eax
pshufd xmm15, xmm15, 0x00
prefetcht0 [r8+rdx+0x80]
prefetcht0 [r9+rdx+0x80]
prefetcht0 [r10+rdx+0x80]
prefetcht0 [r11+rdx+0x80]
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [BLAKE3_IV_0+rip]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x80]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x70]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x10]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0xD0]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x60]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xB0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x50]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0xE0]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x40]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x50]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xC0]
paddd xmm1, xmmword ptr [rsp+0x90]
paddd xmm2, xmmword ptr [rsp+0xF0]
paddd xmm3, xmmword ptr [rsp+0xE0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0xA0]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0x70]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x20]
paddd xmm1, xmmword ptr [rsp+0x30]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x90]
paddd xmm1, xmmword ptr [rsp+0xB0]
paddd xmm2, xmmword ptr [rsp+0x80]
paddd xmm3, xmmword ptr [rsp+0xF0]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0xC0]
paddd xmm3, xmmword ptr [rsp+0x10]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xD0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x20]
paddd xmm3, xmmword ptr [rsp+0x40]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0x30]
paddd xmm1, xmmword ptr [rsp+0xA0]
paddd xmm2, xmmword ptr [rsp+0x60]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xB0]
paddd xmm1, xmmword ptr [rsp+0x50]
paddd xmm2, xmmword ptr [rsp+0x10]
paddd xmm3, xmmword ptr [rsp+0x80]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xF0]
paddd xmm1, xmmword ptr [rsp]
paddd xmm2, xmmword ptr [rsp+0x90]
paddd xmm3, xmmword ptr [rsp+0x60]
paddd xmm0, xmm4
paddd xmm1, xmm5
paddd xmm2, xmm6
paddd xmm3, xmm7
pxor xmm12, xmm0
pxor xmm13, xmm1
pxor xmm14, xmm2
pxor xmm15, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
pshufb xmm15, xmm8
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm12
paddd xmm9, xmm13
paddd xmm10, xmm14
paddd xmm11, xmm15
pxor xmm4, xmm8
pxor xmm5, xmm9
pxor xmm6, xmm10
pxor xmm7, xmm11
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
paddd xmm0, xmmword ptr [rsp+0xE0]
paddd xmm1, xmmword ptr [rsp+0x20]
paddd xmm2, xmmword ptr [rsp+0x30]
paddd xmm3, xmmword ptr [rsp+0x70]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT16+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
movdqa xmmword ptr [rsp+0x100], xmm8
movdqa xmm8, xmm5
psrld xmm8, 12
pslld xmm5, 20
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 12
pslld xmm6, 20
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 12
pslld xmm7, 20
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 12
pslld xmm4, 20
por xmm4, xmm8
paddd xmm0, xmmword ptr [rsp+0xA0]
paddd xmm1, xmmword ptr [rsp+0xC0]
paddd xmm2, xmmword ptr [rsp+0x40]
paddd xmm3, xmmword ptr [rsp+0xD0]
paddd xmm0, xmm5
paddd xmm1, xmm6
paddd xmm2, xmm7
paddd xmm3, xmm4
pxor xmm15, xmm0
pxor xmm12, xmm1
pxor xmm13, xmm2
pxor xmm14, xmm3
movdqa xmm8, xmmword ptr [ROT8+rip]
pshufb xmm15, xmm8
pshufb xmm12, xmm8
pshufb xmm13, xmm8
pshufb xmm14, xmm8
paddd xmm10, xmm15
paddd xmm11, xmm12
movdqa xmm8, xmmword ptr [rsp+0x100]
paddd xmm8, xmm13
paddd xmm9, xmm14
pxor xmm5, xmm10
pxor xmm6, xmm11
pxor xmm7, xmm8
pxor xmm4, xmm9
pxor xmm0, xmm8
pxor xmm1, xmm9
pxor xmm2, xmm10
pxor xmm3, xmm11
movdqa xmm8, xmm5
psrld xmm8, 7
pslld xmm5, 25
por xmm5, xmm8
movdqa xmm8, xmm6
psrld xmm8, 7
pslld xmm6, 25
por xmm6, xmm8
movdqa xmm8, xmm7
psrld xmm8, 7
pslld xmm7, 25
por xmm7, xmm8
movdqa xmm8, xmm4
psrld xmm8, 7
pslld xmm4, 25
por xmm4, xmm8
pxor xmm4, xmm12
pxor xmm5, xmm13
pxor xmm6, xmm14
pxor xmm7, xmm15
mov eax, r13d
jne 9b
movdqa xmm9, xmm0
punpckldq xmm0, xmm1
punpckhdq xmm9, xmm1
movdqa xmm11, xmm2
punpckldq xmm2, xmm3
punpckhdq xmm11, xmm3
movdqa xmm1, xmm0
punpcklqdq xmm0, xmm2
punpckhqdq xmm1, xmm2
movdqa xmm3, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm3, xmm11
movdqu xmmword ptr [rbx], xmm0
movdqu xmmword ptr [rbx+0x20], xmm1
movdqu xmmword ptr [rbx+0x40], xmm9
movdqu xmmword ptr [rbx+0x60], xmm3
movdqa xmm9, xmm4
punpckldq xmm4, xmm5
punpckhdq xmm9, xmm5
movdqa xmm11, xmm6
punpckldq xmm6, xmm7
punpckhdq xmm11, xmm7
movdqa xmm5, xmm4
punpcklqdq xmm4, xmm6
punpckhqdq xmm5, xmm6
movdqa xmm7, xmm9
punpcklqdq xmm9, xmm11
punpckhqdq xmm7, xmm11
movdqu xmmword ptr [rbx+0x10], xmm4
movdqu xmmword ptr [rbx+0x30], xmm5
movdqu xmmword ptr [rbx+0x50], xmm9
movdqu xmmword ptr [rbx+0x70], xmm7
movdqa xmm1, xmmword ptr [rsp+0x110]
movdqa xmm0, xmm1
paddd xmm1, xmmword ptr [rsp+0x150]
movdqa xmmword ptr [rsp+0x110], xmm1
pxor xmm0, xmmword ptr [CMP_MSB_MASK+rip]
pxor xmm1, xmmword ptr [CMP_MSB_MASK+rip]
pcmpgtd xmm0, xmm1
movdqa xmm1, xmmword ptr [rsp+0x120]
psubd xmm1, xmm0
movdqa xmmword ptr [rsp+0x120], xmm1
add rbx, 128
add rdi, 32
sub rsi, 4
cmp rsi, 4
jnc 2b
test rsi, rsi
jne 3f
4:
movdqa xmm6, xmmword ptr [rsp+0x170]
movdqa xmm7, xmmword ptr [rsp+0x180]
movdqa xmm8, xmmword ptr [rsp+0x190]
movdqa xmm9, xmmword ptr [rsp+0x1A0]
movdqa xmm10, xmmword ptr [rsp+0x1B0]
movdqa xmm11, xmmword ptr [rsp+0x1C0]
movdqa xmm12, xmmword ptr [rsp+0x1D0]
movdqa xmm13, xmmword ptr [rsp+0x1E0]
movdqa xmm14, xmmword ptr [rsp+0x1F0]
movdqa xmm15, xmmword ptr [rsp+0x200]
mov rsp, rbp
pop rbp
pop rbx
pop rdi
pop rsi
pop r12
pop r13
pop r14
pop r15
ret
.p2align 5
3:
test esi, 0x2
je 3f
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movaps xmm8, xmm0
movaps xmm9, xmm1
movd xmm13, dword ptr [rsp+0x110]
pinsrd xmm13, dword ptr [rsp+0x120], 1
pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
movaps xmmword ptr [rsp], xmm13
movd xmm14, dword ptr [rsp+0x114]
pinsrd xmm14, dword ptr [rsp+0x124], 1
pinsrd xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
movaps xmmword ptr [rsp+0x10], xmm14
mov r8, qword ptr [rdi]
mov r9, qword ptr [rdi+0x8]
movzx eax, byte ptr [rbp+0x80]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movaps xmm10, xmm2
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm3, xmm4
shufps xmm4, xmm5, 136
shufps xmm3, xmm5, 221
movaps xmm5, xmm3
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm3, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm3, xmm7, 221
pshufd xmm7, xmm3, 0x93
movups xmm12, xmmword ptr [r9+rdx-0x40]
movups xmm13, xmmword ptr [r9+rdx-0x30]
movaps xmm11, xmm12
shufps xmm12, xmm13, 136
shufps xmm11, xmm13, 221
movaps xmm13, xmm11
movups xmm14, xmmword ptr [r9+rdx-0x20]
movups xmm15, xmmword ptr [r9+rdx-0x10]
movaps xmm11, xmm14
shufps xmm14, xmm15, 136
pshufd xmm14, xmm14, 0x93
shufps xmm11, xmm15, 221
pshufd xmm15, xmm11, 0x93
movaps xmm3, xmmword ptr [rsp]
movaps xmm11, xmmword ptr [rsp+0x10]
pinsrd xmm3, eax, 3
pinsrd xmm11, eax, 3
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm8, xmm12
movaps xmmword ptr [rsp+0x20], xmm4
movaps xmmword ptr [rsp+0x30], xmm12
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movaps xmm12, xmmword ptr [ROT16+rip]
pshufb xmm3, xmm12
pshufb xmm11, xmm12
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm5
paddd xmm8, xmm13
movaps xmmword ptr [rsp+0x40], xmm5
movaps xmmword ptr [rsp+0x50], xmm13
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
movaps xmm13, xmmword ptr [ROT8+rip]
pshufb xmm3, xmm13
pshufb xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x93
pshufd xmm8, xmm8, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x39
pshufd xmm10, xmm10, 0x39
paddd xmm0, xmm6
paddd xmm8, xmm14
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshufb xmm3, xmm12
pshufb xmm11, xmm12
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 20
psrld xmm4, 12
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 20
psrld xmm4, 12
por xmm9, xmm4
paddd xmm0, xmm7
paddd xmm8, xmm15
paddd xmm0, xmm1
paddd xmm8, xmm9
pxor xmm3, xmm0
pxor xmm11, xmm8
pshufb xmm3, xmm13
pshufb xmm11, xmm13
paddd xmm2, xmm3
paddd xmm10, xmm11
pxor xmm1, xmm2
pxor xmm9, xmm10
movdqa xmm4, xmm1
pslld xmm1, 25
psrld xmm4, 7
por xmm1, xmm4
movdqa xmm4, xmm9
pslld xmm9, 25
psrld xmm4, 7
por xmm9, xmm4
pshufd xmm0, xmm0, 0x39
pshufd xmm8, xmm8, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm11, xmm11, 0x4E
pshufd xmm2, xmm2, 0x93
pshufd xmm10, xmm10, 0x93
dec al
je 9f
movdqa xmm12, xmmword ptr [rsp+0x20]
movdqa xmm5, xmmword ptr [rsp+0x40]
pshufd xmm13, xmm12, 0x0F
shufps xmm12, xmm5, 214
pshufd xmm4, xmm12, 0x39
movdqa xmm12, xmm6
shufps xmm12, xmm7, 250
pblendw xmm13, xmm12, 0xCC
movdqa xmm12, xmm7
punpcklqdq xmm12, xmm5
pblendw xmm12, xmm6, 0xC0
pshufd xmm12, xmm12, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmmword ptr [rsp+0x20], xmm13
movdqa xmmword ptr [rsp+0x40], xmm12
movdqa xmm5, xmmword ptr [rsp+0x30]
movdqa xmm13, xmmword ptr [rsp+0x50]
pshufd xmm6, xmm5, 0x0F
shufps xmm5, xmm13, 214
pshufd xmm12, xmm5, 0x39
movdqa xmm5, xmm14
shufps xmm5, xmm15, 250
pblendw xmm6, xmm5, 0xCC
movdqa xmm5, xmm15
punpcklqdq xmm5, xmm13
pblendw xmm5, xmm14, 0xC0
pshufd xmm5, xmm5, 0x78
punpckhdq xmm13, xmm15
punpckldq xmm14, xmm13
pshufd xmm15, xmm14, 0x1E
movdqa xmm13, xmm6
movdqa xmm14, xmm5
movdqa xmm5, xmmword ptr [rsp+0x20]
movdqa xmm6, xmmword ptr [rsp+0x40]
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm8, xmm10
pxor xmm9, xmm11
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
movups xmmword ptr [rbx+0x20], xmm8
movups xmmword ptr [rbx+0x30], xmm9
movdqa xmm0, xmmword ptr [rsp+0x130]
movdqa xmm1, xmmword ptr [rsp+0x110]
movdqa xmm2, xmmword ptr [rsp+0x120]
movdqu xmm3, xmmword ptr [rsp+0x118]
movdqu xmm4, xmmword ptr [rsp+0x128]
blendvps xmm1, xmm3, xmm0
blendvps xmm2, xmm4, xmm0
movdqa xmmword ptr [rsp+0x110], xmm1
movdqa xmmword ptr [rsp+0x120], xmm2
add rdi, 16
add rbx, 64
sub rsi, 2
3:
test esi, 0x1
je 4b
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movd xmm13, dword ptr [rsp+0x110]
pinsrd xmm13, dword ptr [rsp+0x120], 1
pinsrd xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2
movaps xmm14, xmmword ptr [ROT8+rip]
movaps xmm15, xmmword ptr [ROT16+rip]
mov r8, qword ptr [rdi]
movzx eax, byte ptr [rbp+0x80]
or eax, r13d
xor edx, edx
2:
mov r14d, eax
or eax, r12d
add rdx, 64
cmp rdx, r15
cmovne eax, r14d
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movaps xmm3, xmm13
pinsrd xmm3, eax, 3
movups xmm4, xmmword ptr [r8+rdx-0x40]
movups xmm5, xmmword ptr [r8+rdx-0x30]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [r8+rdx-0x20]
movups xmm7, xmmword ptr [r8+rdx-0x10]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pblendw xmm9, xmm8, 0xCC
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
pblendw xmm8, xmm6, 0xC0
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
mov eax, r13d
cmp rdx, r15
jne 2b
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+0x10], xmm1
jmp 4b
.p2align 6
blake3_compress_in_place_sse41:
_blake3_compress_in_place_sse41:
sub rsp, 120
movdqa xmmword ptr [rsp], xmm6
movdqa xmmword ptr [rsp+0x10], xmm7
movdqa xmmword ptr [rsp+0x20], xmm8
movdqa xmmword ptr [rsp+0x30], xmm9
movdqa xmmword ptr [rsp+0x40], xmm11
movdqa xmmword ptr [rsp+0x50], xmm14
movdqa xmmword ptr [rsp+0x60], xmm15
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movzx eax, byte ptr [rsp+0xA0]
movzx r8d, r8b
shl rax, 32
add r8, rax
movq xmm3, r9
movq xmm4, r8
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rdx]
movups xmm5, xmmword ptr [rdx+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rdx+0x20]
movups xmm7, xmmword ptr [rdx+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
movaps xmm14, xmmword ptr [ROT8+rip]
movaps xmm15, xmmword ptr [ROT16+rip]
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pblendw xmm9, xmm8, 0xCC
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
pblendw xmm8, xmm6, 0xC0
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
pxor xmm0, xmm2
pxor xmm1, xmm3
movups xmmword ptr [rcx], xmm0
movups xmmword ptr [rcx+0x10], xmm1
movdqa xmm6, xmmword ptr [rsp]
movdqa xmm7, xmmword ptr [rsp+0x10]
movdqa xmm8, xmmword ptr [rsp+0x20]
movdqa xmm9, xmmword ptr [rsp+0x30]
movdqa xmm11, xmmword ptr [rsp+0x40]
movdqa xmm14, xmmword ptr [rsp+0x50]
movdqa xmm15, xmmword ptr [rsp+0x60]
add rsp, 120
ret
.p2align 6
_blake3_compress_xof_sse41:
blake3_compress_xof_sse41:
sub rsp, 120
movdqa xmmword ptr [rsp], xmm6
movdqa xmmword ptr [rsp+0x10], xmm7
movdqa xmmword ptr [rsp+0x20], xmm8
movdqa xmmword ptr [rsp+0x30], xmm9
movdqa xmmword ptr [rsp+0x40], xmm11
movdqa xmmword ptr [rsp+0x50], xmm14
movdqa xmmword ptr [rsp+0x60], xmm15
movups xmm0, xmmword ptr [rcx]
movups xmm1, xmmword ptr [rcx+0x10]
movaps xmm2, xmmword ptr [BLAKE3_IV+rip]
movzx eax, byte ptr [rsp+0xA0]
movzx r8d, r8b
mov r10, qword ptr [rsp+0xA8]
shl rax, 32
add r8, rax
movq xmm3, r9
movq xmm4, r8
punpcklqdq xmm3, xmm4
movups xmm4, xmmword ptr [rdx]
movups xmm5, xmmword ptr [rdx+0x10]
movaps xmm8, xmm4
shufps xmm4, xmm5, 136
shufps xmm8, xmm5, 221
movaps xmm5, xmm8
movups xmm6, xmmword ptr [rdx+0x20]
movups xmm7, xmmword ptr [rdx+0x30]
movaps xmm8, xmm6
shufps xmm6, xmm7, 136
pshufd xmm6, xmm6, 0x93
shufps xmm8, xmm7, 221
pshufd xmm7, xmm8, 0x93
movaps xmm14, xmmword ptr [ROT8+rip]
movaps xmm15, xmmword ptr [ROT16+rip]
mov al, 7
9:
paddd xmm0, xmm4
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm5
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x93
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x39
paddd xmm0, xmm6
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm15
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 20
psrld xmm11, 12
por xmm1, xmm11
paddd xmm0, xmm7
paddd xmm0, xmm1
pxor xmm3, xmm0
pshufb xmm3, xmm14
paddd xmm2, xmm3
pxor xmm1, xmm2
movdqa xmm11, xmm1
pslld xmm1, 25
psrld xmm11, 7
por xmm1, xmm11
pshufd xmm0, xmm0, 0x39
pshufd xmm3, xmm3, 0x4E
pshufd xmm2, xmm2, 0x93
dec al
jz 9f
movdqa xmm8, xmm4
shufps xmm8, xmm5, 214
pshufd xmm9, xmm4, 0x0F
pshufd xmm4, xmm8, 0x39
movdqa xmm8, xmm6
shufps xmm8, xmm7, 250
pblendw xmm9, xmm8, 0xCC
movdqa xmm8, xmm7
punpcklqdq xmm8, xmm5
pblendw xmm8, xmm6, 0xC0
pshufd xmm8, xmm8, 0x78
punpckhdq xmm5, xmm7
punpckldq xmm6, xmm5
pshufd xmm7, xmm6, 0x1E
movdqa xmm5, xmm9
movdqa xmm6, xmm8
jmp 9b
9:
movdqu xmm4, xmmword ptr [rcx]
movdqu xmm5, xmmword ptr [rcx+0x10]
pxor xmm0, xmm2
pxor xmm1, xmm3
pxor xmm2, xmm4
pxor xmm3, xmm5
movups xmmword ptr [r10], xmm0
movups xmmword ptr [r10+0x10], xmm1
movups xmmword ptr [r10+0x20], xmm2
movups xmmword ptr [r10+0x30], xmm3
movdqa xmm6, xmmword ptr [rsp]
movdqa xmm7, xmmword ptr [rsp+0x10]
movdqa xmm8, xmmword ptr [rsp+0x20]
movdqa xmm9, xmmword ptr [rsp+0x30]
movdqa xmm11, xmmword ptr [rsp+0x40]
movdqa xmm14, xmmword ptr [rsp+0x50]
movdqa xmm15, xmmword ptr [rsp+0x60]
add rsp, 120
ret
.section .rodata
.p2align 6
BLAKE3_IV:
.long 0x6A09E667, 0xBB67AE85
.long 0x3C6EF372, 0xA54FF53A
ROT16:
.byte 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13
ROT8:
.byte 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
ADD0:
.long 0, 1, 2, 3
ADD1:
.long 4, 4, 4, 4
BLAKE3_IV_0:
.long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667
BLAKE3_IV_1:
.long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85
BLAKE3_IV_2:
.long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372
BLAKE3_IV_3:
.long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A
BLAKE3_BLOCK_LEN:
.long 64, 64, 64, 64
CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/COMP/COMP_Interrupt/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/COMP/COMP_Interrupt/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aenu1/aps3e
| 439,413
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/sha3_asm.S
|
/* sha3_asm.S */
/*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef WOLFSSL_USER_SETTINGS
#ifdef WOLFSSL_USER_SETTINGS_ASM
/*
* user_settings_asm.h is a file generated by the script user_settings_asm.sh.
* The script takes in a user_settings.h and produces user_settings_asm.h, which
* is a stripped down version of user_settings.h containing only preprocessor
* directives. This makes the header safe to include in assembly (.S) files.
*/
#include "user_settings_asm.h"
#else
/*
* Note: if user_settings.h contains any C code (e.g. a typedef or function
* prototype), including it here in an assembly (.S) file will cause an
* assembler failure. See user_settings_asm.h above.
*/
#include "user_settings.h"
#endif /* WOLFSSL_USER_SETTINGS_ASM */
#endif /* WOLFSSL_USER_SETTINGS */
#ifndef HAVE_INTEL_AVX1
#define HAVE_INTEL_AVX1
#endif /* HAVE_INTEL_AVX1 */
#ifndef NO_AVX2_SUPPORT
#define HAVE_INTEL_AVX2
#endif /* NO_AVX2_SUPPORT */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_sha3_avx2_r:
.quad 0x1,0x1
.quad 0x1,0x1
.quad 0x8082,0x8082
.quad 0x8082,0x8082
.quad 0x800000000000808a,0x800000000000808a
.quad 0x800000000000808a,0x800000000000808a
.quad 0x8000000080008000,0x8000000080008000
.quad 0x8000000080008000,0x8000000080008000
.quad 0x808b,0x808b
.quad 0x808b,0x808b
.quad 0x80000001,0x80000001
.quad 0x80000001,0x80000001
.quad 0x8000000080008081,0x8000000080008081
.quad 0x8000000080008081,0x8000000080008081
.quad 0x8000000000008009,0x8000000000008009
.quad 0x8000000000008009,0x8000000000008009
.quad 0x8a,0x8a
.quad 0x8a,0x8a
.quad 0x88,0x88
.quad 0x88,0x88
.quad 0x80008009,0x80008009
.quad 0x80008009,0x80008009
.quad 0x8000000a,0x8000000a
.quad 0x8000000a,0x8000000a
.quad 0x8000808b,0x8000808b
.quad 0x8000808b,0x8000808b
.quad 0x800000000000008b,0x800000000000008b
.quad 0x800000000000008b,0x800000000000008b
.quad 0x8000000000008089,0x8000000000008089
.quad 0x8000000000008089,0x8000000000008089
.quad 0x8000000000008003,0x8000000000008003
.quad 0x8000000000008003,0x8000000000008003
.quad 0x8000000000008002,0x8000000000008002
.quad 0x8000000000008002,0x8000000000008002
.quad 0x8000000000000080,0x8000000000000080
.quad 0x8000000000000080,0x8000000000000080
.quad 0x800a,0x800a
.quad 0x800a,0x800a
.quad 0x800000008000000a,0x800000008000000a
.quad 0x800000008000000a,0x800000008000000a
.quad 0x8000000080008081,0x8000000080008081
.quad 0x8000000080008081,0x8000000080008081
.quad 0x8000000000008080,0x8000000000008080
.quad 0x8000000000008080,0x8000000000008080
.quad 0x80000001,0x80000001
.quad 0x80000001,0x80000001
.quad 0x8000000080008008,0x8000000080008008
.quad 0x8000000080008008,0x8000000080008008
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_sha3_x4_avx2_r:
.quad 0x1,0x1
.quad 0x1,0x1
.quad 0x8082,0x8082
.quad 0x8082,0x8082
.quad 0x800000000000808a,0x800000000000808a
.quad 0x800000000000808a,0x800000000000808a
.quad 0x8000000080008000,0x8000000080008000
.quad 0x8000000080008000,0x8000000080008000
.quad 0x808b,0x808b
.quad 0x808b,0x808b
.quad 0x80000001,0x80000001
.quad 0x80000001,0x80000001
.quad 0x8000000080008081,0x8000000080008081
.quad 0x8000000080008081,0x8000000080008081
.quad 0x8000000000008009,0x8000000000008009
.quad 0x8000000000008009,0x8000000000008009
.quad 0x8a,0x8a
.quad 0x8a,0x8a
.quad 0x88,0x88
.quad 0x88,0x88
.quad 0x80008009,0x80008009
.quad 0x80008009,0x80008009
.quad 0x8000000a,0x8000000a
.quad 0x8000000a,0x8000000a
.quad 0x8000808b,0x8000808b
.quad 0x8000808b,0x8000808b
.quad 0x800000000000008b,0x800000000000008b
.quad 0x800000000000008b,0x800000000000008b
.quad 0x8000000000008089,0x8000000000008089
.quad 0x8000000000008089,0x8000000000008089
.quad 0x8000000000008003,0x8000000000008003
.quad 0x8000000000008003,0x8000000000008003
.quad 0x8000000000008002,0x8000000000008002
.quad 0x8000000000008002,0x8000000000008002
.quad 0x8000000000000080,0x8000000000000080
.quad 0x8000000000000080,0x8000000000000080
.quad 0x800a,0x800a
.quad 0x800a,0x800a
.quad 0x800000008000000a,0x800000008000000a
.quad 0x800000008000000a,0x800000008000000a
.quad 0x8000000080008081,0x8000000080008081
.quad 0x8000000080008081,0x8000000080008081
.quad 0x8000000000008080,0x8000000000008080
.quad 0x8000000000008080,0x8000000000008080
.quad 0x80000001,0x80000001
.quad 0x80000001,0x80000001
.quad 0x8000000080008008,0x8000000080008008
.quad 0x8000000080008008,0x8000000080008008
#ifdef HAVE_INTEL_AVX2
#ifndef __APPLE__
.text
.globl sha3_block_bmi2
.type sha3_block_bmi2,@function
.align 16
sha3_block_bmi2:
#else
.section __TEXT,__text
.globl _sha3_block_bmi2
.p2align 4
_sha3_block_bmi2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq (%rdi), %rsi
addq $0x60, %rdi
# Round 0
movq %rsi, %r10
movq -88(%rdi), %r11
movq -80(%rdi), %r12
movq -72(%rdi), %r13
movq -64(%rdi), %r14
xorq -56(%rdi), %r10
xorq -48(%rdi), %r11
xorq -40(%rdi), %r12
xorq -32(%rdi), %r13
xorq -24(%rdi), %r14
xorq -16(%rdi), %r10
xorq -8(%rdi), %r11
xorq (%rdi), %r12
xorq 8(%rdi), %r13
xorq 16(%rdi), %r14
xorq 24(%rdi), %r10
xorq 32(%rdi), %r11
xorq 40(%rdi), %r12
xorq 48(%rdi), %r13
xorq 56(%rdi), %r14
xorq 64(%rdi), %r10
xorq 72(%rdi), %r11
xorq 80(%rdi), %r12
xorq 88(%rdi), %r13
xorq 96(%rdi), %r14
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq -48(%rdi), %r11
movq (%rdi), %r12
movq 48(%rdi), %r13
movq 96(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -48(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, (%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 48(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 96(%rdi)
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq $0x01, %rsi
# Row 1
movq -72(%rdi), %r10
movq -24(%rdi), %r11
movq -16(%rdi), %r12
movq 32(%rdi), %r13
movq 80(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -24(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -16(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 32(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 80(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -72(%rdi)
# Row 2
movq -88(%rdi), %r10
movq -40(%rdi), %r11
movq 8(%rdi), %r12
movq 56(%rdi), %r13
movq 64(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -40(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 8(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 56(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 64(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -88(%rdi)
# Row 3
movq -64(%rdi), %r10
movq -56(%rdi), %r11
movq -8(%rdi), %r12
movq 40(%rdi), %r13
movq 88(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -56(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -8(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 40(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 88(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -64(%rdi)
# Row 4
xorq -80(%rdi), %rcx
xorq -32(%rdi), %r8
xorq 16(%rdi), %r9
xorq 24(%rdi), %rdx
xorq 72(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, -80(%rdi)
movq %r11, -32(%rdi)
movq %r12, 16(%rdi)
movq %r13, 24(%rdi)
movq %r14, 72(%rdi)
# Round 1
xorq %rsi, %r10
xorq -88(%rdi), %r10
xorq -72(%rdi), %r10
xorq -64(%rdi), %r10
xorq -56(%rdi), %r11
xorq -48(%rdi), %r11
xorq -40(%rdi), %r11
xorq -24(%rdi), %r11
xorq -16(%rdi), %r12
xorq -8(%rdi), %r12
xorq (%rdi), %r12
xorq 8(%rdi), %r12
xorq 32(%rdi), %r13
xorq 40(%rdi), %r13
xorq 48(%rdi), %r13
xorq 56(%rdi), %r13
xorq 64(%rdi), %r14
xorq 80(%rdi), %r14
xorq 88(%rdi), %r14
xorq 96(%rdi), %r14
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq -24(%rdi), %r11
movq 8(%rdi), %r12
movq 40(%rdi), %r13
movq 72(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -24(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 8(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 40(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 72(%rdi)
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq $0x8082, %rsi
# Row 1
movq 48(%rdi), %r10
movq 80(%rdi), %r11
movq -88(%rdi), %r12
movq -56(%rdi), %r13
movq 16(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 80(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -88(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -56(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 16(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 48(%rdi)
# Row 2
movq -48(%rdi), %r10
movq -16(%rdi), %r11
movq 56(%rdi), %r12
movq 88(%rdi), %r13
movq -80(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -16(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 56(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 88(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -80(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -48(%rdi)
# Row 3
movq 96(%rdi), %r10
movq -72(%rdi), %r11
movq -40(%rdi), %r12
movq -8(%rdi), %r13
movq 24(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -72(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -40(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -8(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 24(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 96(%rdi)
# Row 4
xorq (%rdi), %rcx
xorq 32(%rdi), %r8
xorq 64(%rdi), %r9
xorq -64(%rdi), %rdx
xorq -32(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, (%rdi)
movq %r11, 32(%rdi)
movq %r12, 64(%rdi)
movq %r13, -64(%rdi)
movq %r14, -32(%rdi)
# Round 2
xorq %rsi, %r10
xorq -88(%rdi), %r12
xorq -80(%rdi), %r14
xorq -72(%rdi), %r11
xorq -56(%rdi), %r13
xorq -48(%rdi), %r10
xorq -40(%rdi), %r12
xorq -24(%rdi), %r11
xorq -16(%rdi), %r11
xorq -8(%rdi), %r13
xorq 8(%rdi), %r12
xorq 16(%rdi), %r14
xorq 24(%rdi), %r14
xorq 40(%rdi), %r13
xorq 48(%rdi), %r10
xorq 56(%rdi), %r12
xorq 72(%rdi), %r14
xorq 80(%rdi), %r11
xorq 88(%rdi), %r13
xorq 96(%rdi), %r10
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq 80(%rdi), %r11
movq 56(%rdi), %r12
movq -8(%rdi), %r13
movq -32(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 80(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 56(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -8(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -32(%rdi)
movq $0x800000000000808a, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq 40(%rdi), %r10
movq 16(%rdi), %r11
movq -48(%rdi), %r12
movq -72(%rdi), %r13
movq 64(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 16(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -48(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -72(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 64(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 40(%rdi)
# Row 2
movq -24(%rdi), %r10
movq -88(%rdi), %r11
movq 88(%rdi), %r12
movq 24(%rdi), %r13
movq (%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -88(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 88(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 24(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, (%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -24(%rdi)
# Row 3
movq 72(%rdi), %r10
movq 48(%rdi), %r11
movq -16(%rdi), %r12
movq -40(%rdi), %r13
movq -64(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 48(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -16(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -40(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -64(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 72(%rdi)
# Row 4
xorq 8(%rdi), %rcx
xorq -56(%rdi), %r8
xorq -80(%rdi), %r9
xorq 96(%rdi), %rdx
xorq 32(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, 8(%rdi)
movq %r11, -56(%rdi)
movq %r12, -80(%rdi)
movq %r13, 96(%rdi)
movq %r14, 32(%rdi)
# Round 3
xorq %rsi, %r10
xorq -88(%rdi), %r11
xorq -72(%rdi), %r13
xorq -64(%rdi), %r14
xorq -48(%rdi), %r12
xorq -40(%rdi), %r13
xorq -32(%rdi), %r14
xorq -24(%rdi), %r10
xorq -16(%rdi), %r12
xorq -8(%rdi), %r13
xorq (%rdi), %r14
xorq 16(%rdi), %r11
xorq 24(%rdi), %r13
xorq 40(%rdi), %r10
xorq 48(%rdi), %r11
xorq 56(%rdi), %r12
xorq 64(%rdi), %r14
xorq 72(%rdi), %r10
xorq 80(%rdi), %r11
xorq 88(%rdi), %r12
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq 16(%rdi), %r11
movq 88(%rdi), %r12
movq -40(%rdi), %r13
movq 32(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 16(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 88(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -40(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 32(%rdi)
movq $0x8000000080008000, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq -8(%rdi), %r10
movq 64(%rdi), %r11
movq -24(%rdi), %r12
movq 48(%rdi), %r13
movq -80(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 64(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -24(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 48(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -80(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -8(%rdi)
# Row 2
movq 80(%rdi), %r10
movq -48(%rdi), %r11
movq 24(%rdi), %r12
movq -64(%rdi), %r13
movq 8(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -48(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 24(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -64(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 8(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 80(%rdi)
# Row 3
movq -32(%rdi), %r10
movq 40(%rdi), %r11
movq -88(%rdi), %r12
movq -16(%rdi), %r13
movq 96(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 40(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -88(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -16(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 96(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -32(%rdi)
# Row 4
xorq 56(%rdi), %rcx
xorq -72(%rdi), %r8
xorq (%rdi), %r9
xorq 72(%rdi), %rdx
xorq -56(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, 56(%rdi)
movq %r11, -72(%rdi)
movq %r12, (%rdi)
movq %r13, 72(%rdi)
movq %r14, -56(%rdi)
# Round 4
xorq %rsi, %r10
xorq -88(%rdi), %r12
xorq -80(%rdi), %r14
xorq -64(%rdi), %r13
xorq -48(%rdi), %r11
xorq -40(%rdi), %r13
xorq -32(%rdi), %r10
xorq -24(%rdi), %r12
xorq -16(%rdi), %r13
xorq -8(%rdi), %r10
xorq 8(%rdi), %r14
xorq 16(%rdi), %r11
xorq 24(%rdi), %r12
xorq 32(%rdi), %r14
xorq 40(%rdi), %r11
xorq 48(%rdi), %r13
xorq 64(%rdi), %r11
xorq 80(%rdi), %r10
xorq 88(%rdi), %r12
xorq 96(%rdi), %r14
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq 64(%rdi), %r11
movq 24(%rdi), %r12
movq -16(%rdi), %r13
movq -56(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 64(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 24(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -16(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -56(%rdi)
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq $0x808b, %rsi
# Row 1
movq -40(%rdi), %r10
movq -80(%rdi), %r11
movq 80(%rdi), %r12
movq 40(%rdi), %r13
movq (%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -80(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 80(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 40(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, (%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -40(%rdi)
# Row 2
movq 16(%rdi), %r10
movq -24(%rdi), %r11
movq -64(%rdi), %r12
movq 96(%rdi), %r13
movq 56(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -24(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -64(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 96(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 56(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 16(%rdi)
# Row 3
movq 32(%rdi), %r10
movq -8(%rdi), %r11
movq -48(%rdi), %r12
movq -88(%rdi), %r13
movq 72(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -8(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -48(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -88(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 72(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 32(%rdi)
# Row 4
xorq 88(%rdi), %rcx
xorq 48(%rdi), %r8
xorq 8(%rdi), %r9
xorq -32(%rdi), %rdx
xorq -72(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, 88(%rdi)
movq %r11, 48(%rdi)
movq %r12, 8(%rdi)
movq %r13, -32(%rdi)
movq %r14, -72(%rdi)
# Round 5
xorq %rsi, %r10
xorq -88(%rdi), %r13
xorq -80(%rdi), %r11
xorq -64(%rdi), %r12
xorq -56(%rdi), %r14
xorq -48(%rdi), %r12
xorq -40(%rdi), %r10
xorq -24(%rdi), %r11
xorq -16(%rdi), %r13
xorq -8(%rdi), %r11
xorq (%rdi), %r14
xorq 16(%rdi), %r10
xorq 24(%rdi), %r12
xorq 32(%rdi), %r10
xorq 40(%rdi), %r13
xorq 56(%rdi), %r14
xorq 64(%rdi), %r11
xorq 72(%rdi), %r14
xorq 80(%rdi), %r12
xorq 96(%rdi), %r13
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq -80(%rdi), %r11
movq -64(%rdi), %r12
movq -88(%rdi), %r13
movq -72(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -80(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -64(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -88(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -72(%rdi)
movq $0x80000001, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq -16(%rdi), %r10
movq (%rdi), %r11
movq 16(%rdi), %r12
movq -8(%rdi), %r13
movq 8(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, (%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 16(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -8(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 8(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -16(%rdi)
# Row 2
movq 64(%rdi), %r10
movq 80(%rdi), %r11
movq 96(%rdi), %r12
movq 72(%rdi), %r13
movq 88(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 80(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 96(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 72(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 88(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 64(%rdi)
# Row 3
movq -56(%rdi), %r10
movq -40(%rdi), %r11
movq -24(%rdi), %r12
movq -48(%rdi), %r13
movq -32(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -40(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -24(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -48(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -32(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -56(%rdi)
# Row 4
xorq 24(%rdi), %rcx
xorq 40(%rdi), %r8
xorq 56(%rdi), %r9
xorq 32(%rdi), %rdx
xorq 48(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, 24(%rdi)
movq %r11, 40(%rdi)
movq %r12, 56(%rdi)
movq %r13, 32(%rdi)
movq %r14, 48(%rdi)
# Round 6
xorq %rsi, %r10
xorq -88(%rdi), %r13
xorq -80(%rdi), %r11
xorq -72(%rdi), %r14
xorq -64(%rdi), %r12
xorq -56(%rdi), %r10
xorq -48(%rdi), %r13
xorq -40(%rdi), %r11
xorq -32(%rdi), %r14
xorq -24(%rdi), %r12
xorq -16(%rdi), %r10
xorq -8(%rdi), %r13
xorq (%rdi), %r11
xorq 8(%rdi), %r14
xorq 16(%rdi), %r12
xorq 64(%rdi), %r10
xorq 72(%rdi), %r13
xorq 80(%rdi), %r11
xorq 88(%rdi), %r14
xorq 96(%rdi), %r12
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq (%rdi), %r11
movq 96(%rdi), %r12
movq -48(%rdi), %r13
movq 48(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, (%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 96(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -48(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 48(%rdi)
movq $0x8000000080008081, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq -88(%rdi), %r10
movq 8(%rdi), %r11
movq 64(%rdi), %r12
movq -40(%rdi), %r13
movq 56(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 8(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 64(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -40(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 56(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -88(%rdi)
# Row 2
movq -80(%rdi), %r10
movq 16(%rdi), %r11
movq 72(%rdi), %r12
movq -32(%rdi), %r13
movq 24(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 16(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 72(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -32(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 24(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -80(%rdi)
# Row 3
movq -72(%rdi), %r10
movq -16(%rdi), %r11
movq 80(%rdi), %r12
movq -24(%rdi), %r13
movq 32(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -16(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 80(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -24(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 32(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -72(%rdi)
# Row 4
xorq -64(%rdi), %rcx
xorq -8(%rdi), %r8
xorq 88(%rdi), %r9
xorq -56(%rdi), %rdx
xorq 40(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, -64(%rdi)
movq %r11, -8(%rdi)
movq %r12, 88(%rdi)
movq %r13, -56(%rdi)
movq %r14, 40(%rdi)
# Round 7
xorq %rsi, %r10
xorq -88(%rdi), %r10
xorq -80(%rdi), %r10
xorq -72(%rdi), %r10
xorq -48(%rdi), %r13
xorq -40(%rdi), %r13
xorq -32(%rdi), %r13
xorq -24(%rdi), %r13
xorq -16(%rdi), %r11
xorq (%rdi), %r11
xorq 8(%rdi), %r11
xorq 16(%rdi), %r11
xorq 24(%rdi), %r14
xorq 32(%rdi), %r14
xorq 48(%rdi), %r14
xorq 56(%rdi), %r14
xorq 64(%rdi), %r12
xorq 72(%rdi), %r12
xorq 80(%rdi), %r12
xorq 96(%rdi), %r12
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq 8(%rdi), %r11
movq 72(%rdi), %r12
movq -24(%rdi), %r13
movq 40(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 8(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 72(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -24(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 40(%rdi)
movq $0x8000000000008009, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq -48(%rdi), %r10
movq 56(%rdi), %r11
movq -80(%rdi), %r12
movq -16(%rdi), %r13
movq 88(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 56(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -80(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -16(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 88(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -48(%rdi)
# Row 2
movq (%rdi), %r10
movq 64(%rdi), %r11
movq -32(%rdi), %r12
movq 32(%rdi), %r13
movq -64(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 64(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -32(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 32(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -64(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, (%rdi)
# Row 3
movq 48(%rdi), %r10
movq -88(%rdi), %r11
movq 16(%rdi), %r12
movq 80(%rdi), %r13
movq -56(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -88(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 16(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 80(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -56(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 48(%rdi)
# Row 4
xorq 96(%rdi), %rcx
xorq -40(%rdi), %r8
xorq 24(%rdi), %r9
xorq -72(%rdi), %rdx
xorq -8(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, 96(%rdi)
movq %r11, -40(%rdi)
movq %r12, 24(%rdi)
movq %r13, -72(%rdi)
movq %r14, -8(%rdi)
# Round 8
xorq %rsi, %r10
xorq -88(%rdi), %r11
xorq -80(%rdi), %r12
xorq -64(%rdi), %r14
xorq -56(%rdi), %r14
xorq -48(%rdi), %r10
xorq -32(%rdi), %r12
xorq -24(%rdi), %r13
xorq -16(%rdi), %r13
xorq (%rdi), %r10
xorq 8(%rdi), %r11
xorq 16(%rdi), %r12
xorq 32(%rdi), %r13
xorq 40(%rdi), %r14
xorq 48(%rdi), %r10
xorq 56(%rdi), %r11
xorq 64(%rdi), %r11
xorq 72(%rdi), %r12
xorq 80(%rdi), %r13
xorq 88(%rdi), %r14
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq 56(%rdi), %r11
movq -32(%rdi), %r12
movq 80(%rdi), %r13
movq -8(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 56(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -32(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 80(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -8(%rdi)
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq $0x8a, %rsi
# Row 1
movq -24(%rdi), %r10
movq 88(%rdi), %r11
movq (%rdi), %r12
movq -88(%rdi), %r13
movq 24(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 88(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, (%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -88(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 24(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -24(%rdi)
# Row 2
movq 8(%rdi), %r10
movq -80(%rdi), %r11
movq 32(%rdi), %r12
movq -56(%rdi), %r13
movq 96(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -80(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 32(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -56(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 96(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 8(%rdi)
# Row 3
movq 40(%rdi), %r10
movq -48(%rdi), %r11
movq 64(%rdi), %r12
movq 16(%rdi), %r13
movq -72(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -48(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 64(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 16(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -72(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 40(%rdi)
# Row 4
xorq 72(%rdi), %rcx
xorq -16(%rdi), %r8
xorq -64(%rdi), %r9
xorq 48(%rdi), %rdx
xorq -40(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, 72(%rdi)
movq %r11, -16(%rdi)
movq %r12, -64(%rdi)
movq %r13, 48(%rdi)
movq %r14, -40(%rdi)
# Round 9
xorq %rsi, %r10
xorq -88(%rdi), %r13
xorq -80(%rdi), %r11
xorq -72(%rdi), %r14
xorq -56(%rdi), %r13
xorq -48(%rdi), %r11
xorq -32(%rdi), %r12
xorq -24(%rdi), %r10
xorq -8(%rdi), %r14
xorq (%rdi), %r12
xorq 8(%rdi), %r10
xorq 16(%rdi), %r13
xorq 24(%rdi), %r14
xorq 32(%rdi), %r12
xorq 40(%rdi), %r10
xorq 56(%rdi), %r11
xorq 64(%rdi), %r12
xorq 80(%rdi), %r13
xorq 88(%rdi), %r11
xorq 96(%rdi), %r14
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq 88(%rdi), %r11
movq 32(%rdi), %r12
movq 16(%rdi), %r13
movq -40(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 88(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 32(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 16(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -40(%rdi)
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq $0x88, %rsi
# Row 1
movq 80(%rdi), %r10
movq 24(%rdi), %r11
movq 8(%rdi), %r12
movq -48(%rdi), %r13
movq -64(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 24(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 8(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -48(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -64(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 80(%rdi)
# Row 2
movq 56(%rdi), %r10
movq (%rdi), %r11
movq -56(%rdi), %r12
movq -72(%rdi), %r13
movq 72(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, (%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -56(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -72(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 72(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 56(%rdi)
# Row 3
movq -8(%rdi), %r10
movq -24(%rdi), %r11
movq -80(%rdi), %r12
movq 64(%rdi), %r13
movq 48(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -24(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -80(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 64(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 48(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -8(%rdi)
# Row 4
xorq -32(%rdi), %rcx
xorq -88(%rdi), %r8
xorq 96(%rdi), %r9
xorq 40(%rdi), %rdx
xorq -16(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, -32(%rdi)
movq %r11, -88(%rdi)
movq %r12, 96(%rdi)
movq %r13, 40(%rdi)
movq %r14, -16(%rdi)
# Round 10
xorq %rsi, %r10
xorq -80(%rdi), %r12
xorq -72(%rdi), %r13
xorq -64(%rdi), %r14
xorq -56(%rdi), %r12
xorq -48(%rdi), %r13
xorq -40(%rdi), %r14
xorq -24(%rdi), %r11
xorq -8(%rdi), %r10
xorq (%rdi), %r11
xorq 8(%rdi), %r12
xorq 16(%rdi), %r13
xorq 24(%rdi), %r11
xorq 32(%rdi), %r12
xorq 48(%rdi), %r14
xorq 56(%rdi), %r10
xorq 64(%rdi), %r13
xorq 72(%rdi), %r14
xorq 80(%rdi), %r10
xorq 88(%rdi), %r11
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq 24(%rdi), %r11
movq -56(%rdi), %r12
movq 64(%rdi), %r13
movq -16(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 24(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -56(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 64(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -16(%rdi)
movq $0x80008009, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq 16(%rdi), %r10
movq -64(%rdi), %r11
movq 56(%rdi), %r12
movq -24(%rdi), %r13
movq 96(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -64(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 56(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -24(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 96(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 16(%rdi)
# Row 2
movq 88(%rdi), %r10
movq 8(%rdi), %r11
movq -72(%rdi), %r12
movq 48(%rdi), %r13
movq -32(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 8(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -72(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 48(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -32(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 88(%rdi)
# Row 3
movq -40(%rdi), %r10
movq 80(%rdi), %r11
movq (%rdi), %r12
movq -80(%rdi), %r13
movq 40(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 80(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, (%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -80(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 40(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -40(%rdi)
# Row 4
xorq 32(%rdi), %rcx
xorq -48(%rdi), %r8
xorq 72(%rdi), %r9
xorq -8(%rdi), %rdx
xorq -88(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, 32(%rdi)
movq %r11, -48(%rdi)
movq %r12, 72(%rdi)
movq %r13, -8(%rdi)
movq %r14, -88(%rdi)
# Round 11
xorq %rsi, %r10
xorq -80(%rdi), %r13
xorq -72(%rdi), %r12
xorq -64(%rdi), %r11
xorq -56(%rdi), %r12
xorq -40(%rdi), %r10
xorq -32(%rdi), %r14
xorq -24(%rdi), %r13
xorq -16(%rdi), %r14
xorq (%rdi), %r12
xorq 8(%rdi), %r11
xorq 16(%rdi), %r10
xorq 24(%rdi), %r11
xorq 40(%rdi), %r14
xorq 48(%rdi), %r13
xorq 56(%rdi), %r12
xorq 64(%rdi), %r13
xorq 80(%rdi), %r11
xorq 88(%rdi), %r10
xorq 96(%rdi), %r14
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq -64(%rdi), %r11
movq -72(%rdi), %r12
movq -80(%rdi), %r13
movq -88(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -64(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -72(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -80(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -88(%rdi)
movq $0x8000000a, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq 64(%rdi), %r10
movq 96(%rdi), %r11
movq 88(%rdi), %r12
movq 80(%rdi), %r13
movq 72(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 96(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 88(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 80(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 72(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 64(%rdi)
# Row 2
movq 24(%rdi), %r10
movq 56(%rdi), %r11
movq 48(%rdi), %r12
movq 40(%rdi), %r13
movq 32(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 56(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 48(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 40(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 32(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 24(%rdi)
# Row 3
movq -16(%rdi), %r10
movq 16(%rdi), %r11
movq 8(%rdi), %r12
movq (%rdi), %r13
movq -8(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 16(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 8(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, (%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -8(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -16(%rdi)
# Row 4
xorq -56(%rdi), %rcx
xorq -24(%rdi), %r8
xorq -32(%rdi), %r9
xorq -40(%rdi), %rdx
xorq -48(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, -56(%rdi)
movq %r11, -24(%rdi)
movq %r12, -32(%rdi)
movq %r13, -40(%rdi)
movq %r14, -48(%rdi)
# Round 12
xorq %rsi, %r10
xorq -88(%rdi), %r14
xorq -80(%rdi), %r13
xorq -72(%rdi), %r12
xorq -64(%rdi), %r11
xorq -16(%rdi), %r10
xorq -8(%rdi), %r14
xorq (%rdi), %r13
xorq 8(%rdi), %r12
xorq 16(%rdi), %r11
xorq 24(%rdi), %r10
xorq 32(%rdi), %r14
xorq 40(%rdi), %r13
xorq 48(%rdi), %r12
xorq 56(%rdi), %r11
xorq 64(%rdi), %r10
xorq 72(%rdi), %r14
xorq 80(%rdi), %r13
xorq 88(%rdi), %r12
xorq 96(%rdi), %r11
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq 96(%rdi), %r11
movq 48(%rdi), %r12
movq (%rdi), %r13
movq -48(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 96(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 48(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, (%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -48(%rdi)
movq $0x8000808b, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq -80(%rdi), %r10
movq 72(%rdi), %r11
movq 24(%rdi), %r12
movq 16(%rdi), %r13
movq -32(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 72(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 24(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 16(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -32(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -80(%rdi)
# Row 2
movq -64(%rdi), %r10
movq 88(%rdi), %r11
movq 40(%rdi), %r12
movq -8(%rdi), %r13
movq -56(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 88(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 40(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -8(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -56(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -64(%rdi)
# Row 3
movq -88(%rdi), %r10
movq 64(%rdi), %r11
movq 56(%rdi), %r12
movq 8(%rdi), %r13
movq -40(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 64(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 56(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 8(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -40(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -88(%rdi)
# Row 4
xorq -72(%rdi), %rcx
xorq 80(%rdi), %r8
xorq 32(%rdi), %r9
xorq -16(%rdi), %rdx
xorq -24(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, -72(%rdi)
movq %r11, 80(%rdi)
movq %r12, 32(%rdi)
movq %r13, -16(%rdi)
movq %r14, -24(%rdi)
# Round 13
xorq %rsi, %r10
xorq -88(%rdi), %r10
xorq -80(%rdi), %r10
xorq -64(%rdi), %r10
xorq -56(%rdi), %r14
xorq -48(%rdi), %r14
xorq -40(%rdi), %r14
xorq -32(%rdi), %r14
xorq -8(%rdi), %r13
xorq (%rdi), %r13
xorq 8(%rdi), %r13
xorq 16(%rdi), %r13
xorq 24(%rdi), %r12
xorq 40(%rdi), %r12
xorq 48(%rdi), %r12
xorq 56(%rdi), %r12
xorq 64(%rdi), %r11
xorq 72(%rdi), %r11
xorq 88(%rdi), %r11
xorq 96(%rdi), %r11
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq 72(%rdi), %r11
movq 40(%rdi), %r12
movq 8(%rdi), %r13
movq -24(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 72(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 40(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 8(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -24(%rdi)
movq $0x800000000000008b, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq (%rdi), %r10
movq -32(%rdi), %r11
movq -64(%rdi), %r12
movq 64(%rdi), %r13
movq 32(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -32(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -64(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 64(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 32(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, (%rdi)
# Row 2
movq 96(%rdi), %r10
movq 24(%rdi), %r11
movq -8(%rdi), %r12
movq -40(%rdi), %r13
movq -72(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 24(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -8(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -40(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -72(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 96(%rdi)
# Row 3
movq -48(%rdi), %r10
movq -80(%rdi), %r11
movq 88(%rdi), %r12
movq 56(%rdi), %r13
movq -16(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -80(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 88(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 56(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -16(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -48(%rdi)
# Row 4
xorq 48(%rdi), %rcx
xorq 16(%rdi), %r8
xorq -56(%rdi), %r9
xorq -88(%rdi), %rdx
xorq 80(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, 48(%rdi)
movq %r11, 16(%rdi)
movq %r12, -56(%rdi)
movq %r13, -88(%rdi)
movq %r14, 80(%rdi)
# Round 14
xorq %rsi, %r10
xorq -80(%rdi), %r11
xorq -72(%rdi), %r14
xorq -64(%rdi), %r12
xorq -48(%rdi), %r10
xorq -40(%rdi), %r13
xorq -32(%rdi), %r11
xorq -24(%rdi), %r14
xorq -16(%rdi), %r14
xorq -8(%rdi), %r12
xorq (%rdi), %r10
xorq 8(%rdi), %r13
xorq 24(%rdi), %r11
xorq 32(%rdi), %r14
xorq 40(%rdi), %r12
xorq 56(%rdi), %r13
xorq 64(%rdi), %r13
xorq 72(%rdi), %r11
xorq 88(%rdi), %r12
xorq 96(%rdi), %r10
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq -32(%rdi), %r11
movq -8(%rdi), %r12
movq 56(%rdi), %r13
movq 80(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -32(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -8(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 56(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 80(%rdi)
movq $0x8000000000008089, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq 8(%rdi), %r10
movq 32(%rdi), %r11
movq 96(%rdi), %r12
movq -80(%rdi), %r13
movq -56(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 32(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 96(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -80(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -56(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 8(%rdi)
# Row 2
movq 72(%rdi), %r10
movq -64(%rdi), %r11
movq -40(%rdi), %r12
movq -16(%rdi), %r13
movq 48(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -64(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -40(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -16(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 48(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 72(%rdi)
# Row 3
movq -24(%rdi), %r10
movq (%rdi), %r11
movq 24(%rdi), %r12
movq 88(%rdi), %r13
movq -88(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, (%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 24(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 88(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -88(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -24(%rdi)
# Row 4
xorq 40(%rdi), %rcx
xorq 64(%rdi), %r8
xorq -72(%rdi), %r9
xorq -48(%rdi), %rdx
xorq 16(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, 40(%rdi)
movq %r11, 64(%rdi)
movq %r12, -72(%rdi)
movq %r13, -48(%rdi)
movq %r14, 16(%rdi)
# Round 15
xorq %rsi, %r10
xorq -88(%rdi), %r14
xorq -80(%rdi), %r13
xorq -64(%rdi), %r11
xorq -56(%rdi), %r14
xorq -40(%rdi), %r12
xorq -32(%rdi), %r11
xorq -24(%rdi), %r10
xorq -16(%rdi), %r13
xorq -8(%rdi), %r12
xorq (%rdi), %r11
xorq 8(%rdi), %r10
xorq 24(%rdi), %r12
xorq 32(%rdi), %r11
xorq 48(%rdi), %r14
xorq 56(%rdi), %r13
xorq 72(%rdi), %r10
xorq 80(%rdi), %r14
xorq 88(%rdi), %r13
xorq 96(%rdi), %r12
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq 32(%rdi), %r11
movq -40(%rdi), %r12
movq 88(%rdi), %r13
movq 16(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 32(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -40(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 88(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 16(%rdi)
movq $0x8000000000008003, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq 56(%rdi), %r10
movq -56(%rdi), %r11
movq 72(%rdi), %r12
movq (%rdi), %r13
movq -72(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -56(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 72(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, (%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -72(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 56(%rdi)
# Row 2
movq -32(%rdi), %r10
movq 96(%rdi), %r11
movq -16(%rdi), %r12
movq -88(%rdi), %r13
movq 40(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 96(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -16(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -88(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 40(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -32(%rdi)
# Row 3
movq 80(%rdi), %r10
movq 8(%rdi), %r11
movq -64(%rdi), %r12
movq 24(%rdi), %r13
movq -48(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 8(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -64(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 24(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -48(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 80(%rdi)
# Row 4
xorq -8(%rdi), %rcx
xorq -80(%rdi), %r8
xorq 48(%rdi), %r9
xorq -24(%rdi), %rdx
xorq 64(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, -8(%rdi)
movq %r11, -80(%rdi)
movq %r12, 48(%rdi)
movq %r13, -24(%rdi)
movq %r14, 64(%rdi)
# Round 16
xorq %rsi, %r10
xorq -88(%rdi), %r13
xorq -72(%rdi), %r14
xorq -64(%rdi), %r12
xorq -56(%rdi), %r11
xorq -48(%rdi), %r14
xorq -40(%rdi), %r12
xorq -32(%rdi), %r10
xorq -16(%rdi), %r12
xorq (%rdi), %r13
xorq 8(%rdi), %r11
xorq 16(%rdi), %r14
xorq 24(%rdi), %r13
xorq 32(%rdi), %r11
xorq 40(%rdi), %r14
xorq 56(%rdi), %r10
xorq 72(%rdi), %r12
xorq 80(%rdi), %r10
xorq 88(%rdi), %r13
xorq 96(%rdi), %r11
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq -56(%rdi), %r11
movq -16(%rdi), %r12
movq 24(%rdi), %r13
movq 64(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -56(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -16(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 24(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 64(%rdi)
movq $0x8000000000008002, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq 88(%rdi), %r10
movq -72(%rdi), %r11
movq -32(%rdi), %r12
movq 8(%rdi), %r13
movq 48(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -72(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -32(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 8(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 48(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 88(%rdi)
# Row 2
movq 32(%rdi), %r10
movq 72(%rdi), %r11
movq -88(%rdi), %r12
movq -48(%rdi), %r13
movq -8(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 72(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -88(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -48(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -8(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 32(%rdi)
# Row 3
movq 16(%rdi), %r10
movq 56(%rdi), %r11
movq 96(%rdi), %r12
movq -64(%rdi), %r13
movq -24(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 56(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 96(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -64(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -24(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 16(%rdi)
# Row 4
xorq -40(%rdi), %rcx
xorq (%rdi), %r8
xorq 40(%rdi), %r9
xorq 80(%rdi), %rdx
xorq -80(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, -40(%rdi)
movq %r11, (%rdi)
movq %r12, 40(%rdi)
movq %r13, 80(%rdi)
movq %r14, -80(%rdi)
# Round 17
xorq %rsi, %r10
xorq -88(%rdi), %r12
xorq -72(%rdi), %r11
xorq -64(%rdi), %r13
xorq -56(%rdi), %r11
xorq -48(%rdi), %r13
xorq -32(%rdi), %r12
xorq -24(%rdi), %r14
xorq -16(%rdi), %r12
xorq -8(%rdi), %r14
xorq 8(%rdi), %r13
xorq 16(%rdi), %r10
xorq 24(%rdi), %r13
xorq 32(%rdi), %r10
xorq 48(%rdi), %r14
xorq 56(%rdi), %r11
xorq 64(%rdi), %r14
xorq 72(%rdi), %r11
xorq 88(%rdi), %r10
xorq 96(%rdi), %r12
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq -72(%rdi), %r11
movq -88(%rdi), %r12
movq -64(%rdi), %r13
movq -80(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -72(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -88(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -64(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -80(%rdi)
movq $0x8000000000000080, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq 24(%rdi), %r10
movq 48(%rdi), %r11
movq 32(%rdi), %r12
movq 56(%rdi), %r13
movq 40(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 48(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 32(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 56(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 40(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 24(%rdi)
# Row 2
movq -56(%rdi), %r10
movq -32(%rdi), %r11
movq -48(%rdi), %r12
movq -24(%rdi), %r13
movq -40(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -32(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -48(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -24(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -40(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -56(%rdi)
# Row 3
movq 64(%rdi), %r10
movq 88(%rdi), %r11
movq 72(%rdi), %r12
movq 96(%rdi), %r13
movq 80(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 88(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 72(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 96(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 80(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 64(%rdi)
# Row 4
xorq -16(%rdi), %rcx
xorq 8(%rdi), %r8
xorq -8(%rdi), %r9
xorq 16(%rdi), %rdx
xorq (%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, -16(%rdi)
movq %r11, 8(%rdi)
movq %r12, -8(%rdi)
movq %r13, 16(%rdi)
movq %r14, (%rdi)
# Round 18
xorq %rsi, %r10
xorq -88(%rdi), %r12
xorq -80(%rdi), %r14
xorq -72(%rdi), %r11
xorq -64(%rdi), %r13
xorq -56(%rdi), %r10
xorq -48(%rdi), %r12
xorq -40(%rdi), %r14
xorq -32(%rdi), %r11
xorq -24(%rdi), %r13
xorq 24(%rdi), %r10
xorq 32(%rdi), %r12
xorq 40(%rdi), %r14
xorq 48(%rdi), %r11
xorq 56(%rdi), %r13
xorq 64(%rdi), %r10
xorq 72(%rdi), %r12
xorq 80(%rdi), %r14
xorq 88(%rdi), %r11
xorq 96(%rdi), %r13
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq 48(%rdi), %r11
movq -48(%rdi), %r12
movq 96(%rdi), %r13
movq (%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 48(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -48(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 96(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, (%rdi)
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq $0x800a, %rsi
# Row 1
movq -64(%rdi), %r10
movq 40(%rdi), %r11
movq -56(%rdi), %r12
movq 88(%rdi), %r13
movq -8(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 40(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -56(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 88(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -8(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -64(%rdi)
# Row 2
movq -72(%rdi), %r10
movq 32(%rdi), %r11
movq -24(%rdi), %r12
movq 80(%rdi), %r13
movq -16(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 32(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -24(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 80(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -16(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -72(%rdi)
# Row 3
movq -80(%rdi), %r10
movq 24(%rdi), %r11
movq -32(%rdi), %r12
movq 72(%rdi), %r13
movq 16(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 24(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -32(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 72(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 16(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -80(%rdi)
# Row 4
xorq -88(%rdi), %rcx
xorq 56(%rdi), %r8
xorq -40(%rdi), %r9
xorq 64(%rdi), %rdx
xorq 8(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, -88(%rdi)
movq %r11, 56(%rdi)
movq %r12, -40(%rdi)
movq %r13, 64(%rdi)
movq %r14, 8(%rdi)
# Round 19
xorq %rsi, %r10
xorq -80(%rdi), %r10
xorq -72(%rdi), %r10
xorq -64(%rdi), %r10
xorq -56(%rdi), %r12
xorq -48(%rdi), %r12
xorq -32(%rdi), %r12
xorq -24(%rdi), %r12
xorq -16(%rdi), %r14
xorq -8(%rdi), %r14
xorq (%rdi), %r14
xorq 16(%rdi), %r14
xorq 24(%rdi), %r11
xorq 32(%rdi), %r11
xorq 40(%rdi), %r11
xorq 48(%rdi), %r11
xorq 72(%rdi), %r13
xorq 80(%rdi), %r13
xorq 88(%rdi), %r13
xorq 96(%rdi), %r13
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq 40(%rdi), %r11
movq -24(%rdi), %r12
movq 72(%rdi), %r13
movq 8(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 40(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -24(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 72(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 8(%rdi)
movq $0x800000008000000a, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq 96(%rdi), %r10
movq -8(%rdi), %r11
movq -72(%rdi), %r12
movq 24(%rdi), %r13
movq -40(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -8(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -72(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 24(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -40(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 96(%rdi)
# Row 2
movq 48(%rdi), %r10
movq -56(%rdi), %r11
movq 80(%rdi), %r12
movq 16(%rdi), %r13
movq -88(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -56(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 80(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 16(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -88(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 48(%rdi)
# Row 3
movq (%rdi), %r10
movq -64(%rdi), %r11
movq 32(%rdi), %r12
movq -32(%rdi), %r13
movq 64(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -64(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 32(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -32(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 64(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, (%rdi)
# Row 4
xorq -48(%rdi), %rcx
xorq 88(%rdi), %r8
xorq -16(%rdi), %r9
xorq -80(%rdi), %rdx
xorq 56(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, -48(%rdi)
movq %r11, 88(%rdi)
movq %r12, -16(%rdi)
movq %r13, -80(%rdi)
movq %r14, 56(%rdi)
# Round 20
xorq %rsi, %r10
xorq -88(%rdi), %r14
xorq -72(%rdi), %r12
xorq -64(%rdi), %r11
xorq -56(%rdi), %r11
xorq -40(%rdi), %r14
xorq -32(%rdi), %r13
xorq -24(%rdi), %r12
xorq -8(%rdi), %r11
xorq (%rdi), %r10
xorq 8(%rdi), %r14
xorq 16(%rdi), %r13
xorq 24(%rdi), %r13
xorq 32(%rdi), %r12
xorq 40(%rdi), %r11
xorq 48(%rdi), %r10
xorq 64(%rdi), %r14
xorq 72(%rdi), %r13
xorq 80(%rdi), %r12
xorq 96(%rdi), %r10
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq -8(%rdi), %r11
movq 80(%rdi), %r12
movq -32(%rdi), %r13
movq 56(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -8(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 80(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -32(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 56(%rdi)
movq $0x8000000080008081, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq 72(%rdi), %r10
movq -40(%rdi), %r11
movq 48(%rdi), %r12
movq -64(%rdi), %r13
movq -16(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -40(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 48(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -64(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -16(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 72(%rdi)
# Row 2
movq 40(%rdi), %r10
movq -72(%rdi), %r11
movq 16(%rdi), %r12
movq 64(%rdi), %r13
movq -48(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -72(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 16(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 64(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -48(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 40(%rdi)
# Row 3
movq 8(%rdi), %r10
movq 96(%rdi), %r11
movq -56(%rdi), %r12
movq 32(%rdi), %r13
movq -80(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 96(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -56(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 32(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -80(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 8(%rdi)
# Row 4
xorq -24(%rdi), %rcx
xorq 24(%rdi), %r8
xorq -88(%rdi), %r9
xorq (%rdi), %rdx
xorq 88(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, -24(%rdi)
movq %r11, 24(%rdi)
movq %r12, -88(%rdi)
movq %r13, (%rdi)
movq %r14, 88(%rdi)
# Round 21
xorq %rsi, %r10
xorq -80(%rdi), %r14
xorq -72(%rdi), %r11
xorq -64(%rdi), %r13
xorq -56(%rdi), %r12
xorq -48(%rdi), %r14
xorq -40(%rdi), %r11
xorq -32(%rdi), %r13
xorq -16(%rdi), %r14
xorq -8(%rdi), %r11
xorq 8(%rdi), %r10
xorq 16(%rdi), %r12
xorq 32(%rdi), %r13
xorq 40(%rdi), %r10
xorq 48(%rdi), %r12
xorq 56(%rdi), %r14
xorq 64(%rdi), %r13
xorq 72(%rdi), %r10
xorq 80(%rdi), %r12
xorq 96(%rdi), %r11
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq -40(%rdi), %r11
movq 16(%rdi), %r12
movq 32(%rdi), %r13
movq 88(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -40(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 16(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 32(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 88(%rdi)
movq $0x8000000000008080, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq -32(%rdi), %r10
movq -16(%rdi), %r11
movq 40(%rdi), %r12
movq 96(%rdi), %r13
movq -88(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -16(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 40(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 96(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -88(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -32(%rdi)
# Row 2
movq -8(%rdi), %r10
movq 48(%rdi), %r11
movq 64(%rdi), %r12
movq -80(%rdi), %r13
movq -24(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 48(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 64(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -80(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -24(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -8(%rdi)
# Row 3
movq 56(%rdi), %r10
movq 72(%rdi), %r11
movq -72(%rdi), %r12
movq -56(%rdi), %r13
movq (%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 72(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -72(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -56(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, (%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 56(%rdi)
# Row 4
xorq 80(%rdi), %rcx
xorq -64(%rdi), %r8
xorq -48(%rdi), %r9
xorq 8(%rdi), %rdx
xorq 24(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, 80(%rdi)
movq %r11, -64(%rdi)
movq %r12, -48(%rdi)
movq %r13, 8(%rdi)
movq %r14, 24(%rdi)
# Round 22
xorq %rsi, %r10
xorq -88(%rdi), %r14
xorq -80(%rdi), %r13
xorq -72(%rdi), %r12
xorq -56(%rdi), %r13
xorq -40(%rdi), %r11
xorq -32(%rdi), %r10
xorq -24(%rdi), %r14
xorq -16(%rdi), %r11
xorq -8(%rdi), %r10
xorq (%rdi), %r14
xorq 16(%rdi), %r12
xorq 32(%rdi), %r13
xorq 40(%rdi), %r12
xorq 48(%rdi), %r11
xorq 56(%rdi), %r10
xorq 64(%rdi), %r12
xorq 72(%rdi), %r11
xorq 88(%rdi), %r14
xorq 96(%rdi), %r13
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq -16(%rdi), %r11
movq 64(%rdi), %r12
movq -56(%rdi), %r13
movq 24(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -16(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 64(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -56(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 24(%rdi)
movq $0x80000001, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq 32(%rdi), %r10
movq -88(%rdi), %r11
movq -8(%rdi), %r12
movq 72(%rdi), %r13
movq -48(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -88(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -8(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 72(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -48(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 32(%rdi)
# Row 2
movq -40(%rdi), %r10
movq 40(%rdi), %r11
movq -80(%rdi), %r12
movq (%rdi), %r13
movq 80(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 40(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -80(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, (%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 80(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -40(%rdi)
# Row 3
movq 88(%rdi), %r10
movq -32(%rdi), %r11
movq 48(%rdi), %r12
movq -72(%rdi), %r13
movq 8(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -32(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 48(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -72(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 8(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 88(%rdi)
# Row 4
xorq 16(%rdi), %rcx
xorq 96(%rdi), %r8
xorq -24(%rdi), %r9
xorq 56(%rdi), %rdx
xorq -64(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, 16(%rdi)
movq %r11, 96(%rdi)
movq %r12, -24(%rdi)
movq %r13, 56(%rdi)
movq %r14, -64(%rdi)
# Round 23
xorq %rsi, %r10
xorq -88(%rdi), %r11
xorq -80(%rdi), %r12
xorq -72(%rdi), %r13
xorq -56(%rdi), %r13
xorq -48(%rdi), %r14
xorq -40(%rdi), %r10
xorq -32(%rdi), %r11
xorq -16(%rdi), %r11
xorq -8(%rdi), %r12
xorq (%rdi), %r13
xorq 8(%rdi), %r14
xorq 24(%rdi), %r14
xorq 32(%rdi), %r10
xorq 40(%rdi), %r11
xorq 48(%rdi), %r12
xorq 64(%rdi), %r12
xorq 72(%rdi), %r13
xorq 80(%rdi), %r14
xorq 88(%rdi), %r10
# Calc t[0..4]
rorxq $63, %r11, %rdx
rorxq $63, %r12, %rax
rorxq $63, %r13, %rcx
rorxq $63, %r14, %r8
rorxq $63, %r10, %r9
xorq %r14, %rdx
xorq %r10, %rax
xorq %r11, %rcx
xorq %r12, %r8
xorq %r13, %r9
# Row Mix
# Row 0
movq %rsi, %r10
movq -88(%rdi), %r11
movq -80(%rdi), %r12
movq -72(%rdi), %r13
movq -64(%rdi), %r14
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
rolq $44, %r11
rolq $43, %r12
rolq $21, %r13
rolq $14, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -88(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -80(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -72(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -64(%rdi)
movq $0x8000000080008008, %r14
andnq %r12, %r11, %rsi
xorq %r10, %rsi
# XOR in constant
xorq %r14, %rsi
# Row 1
movq -56(%rdi), %r10
movq -48(%rdi), %r11
movq -40(%rdi), %r12
movq -32(%rdi), %r13
movq -24(%rdi), %r14
xorq %r8, %r10
xorq %r9, %r11
xorq %rdx, %r12
xorq %rax, %r13
xorq %rcx, %r14
rolq $28, %r10
rolq $20, %r11
rolq $3, %r12
rolq $45, %r13
rolq $61, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -48(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, -40(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, -32(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, -24(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -56(%rdi)
# Row 2
movq -16(%rdi), %r10
movq -8(%rdi), %r11
movq (%rdi), %r12
movq 8(%rdi), %r13
movq 16(%rdi), %r14
xorq %rax, %r10
xorq %rcx, %r11
xorq %r8, %r12
xorq %r9, %r13
xorq %rdx, %r14
rolq $0x01, %r10
rolq $6, %r11
rolq $25, %r12
rolq $8, %r13
rolq $18, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, -8(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, (%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 8(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 16(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, -16(%rdi)
# Row 3
movq 24(%rdi), %r10
movq 32(%rdi), %r11
movq 40(%rdi), %r12
movq 48(%rdi), %r13
movq 56(%rdi), %r14
xorq %r9, %r10
xorq %rdx, %r11
xorq %rax, %r12
xorq %rcx, %r13
xorq %r8, %r14
rolq $27, %r10
rolq $36, %r11
rolq $10, %r12
rolq $15, %r13
rolq $56, %r14
andnq %r13, %r12, %r15
xorq %r11, %r15
movq %r15, 32(%rdi)
andnq %r14, %r13, %r15
xorq %r12, %r15
movq %r15, 40(%rdi)
andnq %r10, %r14, %r15
xorq %r13, %r15
movq %r15, 48(%rdi)
andnq %r11, %r10, %r15
xorq %r14, %r15
movq %r15, 56(%rdi)
andnq %r12, %r11, %r15
xorq %r10, %r15
movq %r15, 24(%rdi)
# Row 4
xorq 64(%rdi), %rcx
xorq 72(%rdi), %r8
xorq 80(%rdi), %r9
xorq 88(%rdi), %rdx
xorq 96(%rdi), %rax
rorxq $2, %rcx, %r10
rorxq $9, %r8, %r11
rorxq $25, %r9, %r12
rorxq $23, %rdx, %r13
rorxq $62, %rax, %r14
andnq %r12, %r11, %rdx
andnq %r13, %r12, %rax
andnq %r14, %r13, %rcx
andnq %r10, %r14, %r8
andnq %r11, %r10, %r9
xorq %rdx, %r10
xorq %rax, %r11
xorq %rcx, %r12
xorq %r8, %r13
xorq %r9, %r14
movq %r10, 64(%rdi)
movq %r11, 72(%rdi)
movq %r12, 80(%rdi)
movq %r13, 88(%rdi)
movq %r14, 96(%rdi)
movq %rsi, -96(%rdi)
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sha3_block_bmi2,.-sha3_block_bmi2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl sha3_block_n_bmi2
.type sha3_block_n_bmi2,@function
.align 16
sha3_block_n_bmi2:
#else
.section __TEXT,__text
.globl _sha3_block_n_bmi2
.p2align 4
_sha3_block_n_bmi2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
pushq %rcx
movq %rcx, %rbp
movq (%rdi), %rcx
addq $0x60, %rdi
L_sha3_block_n_bmi2_start:
cmpq $0x88, %rbp
je L_sha3_block_n_bmi2_load_256
cmpq $0xa8, %rbp
je L_sha3_block_n_bmi2_load_128
cmpq $0x90, %rbp
je L_sha3_block_n_bmi2_load_224
cmpq $0x68, %rbp
je L_sha3_block_n_bmi2_load_384
movq (%rsi), %r12
movq 8(%rsi), %r13
movq 16(%rsi), %r14
movq 24(%rsi), %r15
movq 32(%rsi), %rbx
movq 40(%rsi), %rax
movq 48(%rsi), %r8
movq 56(%rsi), %r9
movq 64(%rsi), %r10
xorq %rcx, %r12
xorq -88(%rdi), %r13
xorq -80(%rdi), %r14
xorq -72(%rdi), %r15
xorq -64(%rdi), %rbx
xorq -56(%rdi), %rax
xorq -48(%rdi), %r8
xorq -40(%rdi), %r9
xorq -32(%rdi), %r10
movq %r12, %rcx
movq %r13, -88(%rdi)
movq %r14, -80(%rdi)
movq %r15, -72(%rdi)
movq %rbx, -64(%rdi)
movq %rax, -56(%rdi)
movq %r8, -48(%rdi)
movq %r9, -40(%rdi)
movq %r10, -32(%rdi)
jmp L_sha3_block_n_bmi2_rounds
L_sha3_block_n_bmi2_load_128:
movq (%rsi), %r12
movq 8(%rsi), %r13
movq 16(%rsi), %r14
movq 24(%rsi), %r15
movq 32(%rsi), %rbx
xorq %rcx, %r12
xorq -88(%rdi), %r13
xorq -80(%rdi), %r14
xorq -72(%rdi), %r15
xorq -64(%rdi), %rbx
movq %r12, %rcx
movq %r13, -88(%rdi)
movq %r14, -80(%rdi)
movq %r15, -72(%rdi)
movq %rbx, -64(%rdi)
movq 40(%rsi), %rax
movq 48(%rsi), %r8
movq 56(%rsi), %r9
movq 64(%rsi), %r10
movq 72(%rsi), %r11
movq 80(%rsi), %rbp
xorq -56(%rdi), %rax
xorq -48(%rdi), %r8
xorq -40(%rdi), %r9
xorq -32(%rdi), %r10
xorq -24(%rdi), %r11
xorq -16(%rdi), %rbp
movq %rax, -56(%rdi)
movq %r8, -48(%rdi)
movq %r9, -40(%rdi)
movq %r10, -32(%rdi)
movq %r11, -24(%rdi)
movq %rbp, -16(%rdi)
movq 88(%rsi), %rax
movq 96(%rsi), %r8
movq 104(%rsi), %r9
movq 112(%rsi), %r10
movq 120(%rsi), %r11
movq 128(%rsi), %rbp
xorq -8(%rdi), %rax
xorq (%rdi), %r8
xorq 8(%rdi), %r9
xorq 16(%rdi), %r10
xorq 24(%rdi), %r11
xorq 32(%rdi), %rbp
movq %rax, -8(%rdi)
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq %rbp, 32(%rdi)
movq 136(%rsi), %rax
movq 144(%rsi), %r8
movq 152(%rsi), %r9
movq 160(%rsi), %r10
xorq 40(%rdi), %rax
xorq 48(%rdi), %r8
xorq 56(%rdi), %r9
xorq 64(%rdi), %r10
movq %rax, 40(%rdi)
movq %r8, 48(%rdi)
movq %r9, 56(%rdi)
movq %r10, 64(%rdi)
jmp L_sha3_block_n_bmi2_rounds
L_sha3_block_n_bmi2_load_224:
movq 40(%rsi), %r12
movq 48(%rsi), %r13
movq 56(%rsi), %r14
movq 64(%rsi), %r15
movq 72(%rsi), %rbx
movq 80(%rsi), %rax
movq 88(%rsi), %r8
movq 96(%rsi), %r9
movq 104(%rsi), %r10
movq 112(%rsi), %r11
xorq -56(%rdi), %r12
xorq -48(%rdi), %r13
xorq -40(%rdi), %r14
xorq -32(%rdi), %r15
xorq -24(%rdi), %rbx
xorq -16(%rdi), %rax
xorq -8(%rdi), %r8
xorq (%rdi), %r9
xorq 8(%rdi), %r10
xorq 16(%rdi), %r11
movq %r12, -56(%rdi)
movq %r13, -48(%rdi)
movq %r14, -40(%rdi)
movq %r15, -32(%rdi)
movq %rbx, -24(%rdi)
movq %rax, -16(%rdi)
movq %r8, -8(%rdi)
movq %r9, (%rdi)
movq %r10, 8(%rdi)
movq %r11, 16(%rdi)
movq (%rsi), %r12
movq 8(%rsi), %r13
movq 16(%rsi), %r14
movq 24(%rsi), %r15
movq 32(%rsi), %rbx
movq 120(%rsi), %rax
movq 128(%rsi), %r8
movq 136(%rsi), %r9
xorq %rcx, %r12
xorq -88(%rdi), %r13
xorq -80(%rdi), %r14
xorq -72(%rdi), %r15
xorq -64(%rdi), %rbx
xorq 24(%rdi), %rax
xorq 32(%rdi), %r8
xorq 40(%rdi), %r9
movq %r12, %rcx
movq %r13, -88(%rdi)
movq %r14, -80(%rdi)
movq %r15, -72(%rdi)
movq %rbx, -64(%rdi)
movq %rax, 24(%rdi)
movq %r8, 32(%rdi)
movq %r9, 40(%rdi)
jmp L_sha3_block_n_bmi2_rounds
L_sha3_block_n_bmi2_load_384:
movq (%rsi), %r12
movq 8(%rsi), %r13
movq 16(%rsi), %r14
movq 24(%rsi), %r15
movq 32(%rsi), %rbx
movq 40(%rsi), %rax
movq 48(%rsi), %r8
movq 56(%rsi), %r9
movq 64(%rsi), %r10
xorq %rcx, %r12
xorq -88(%rdi), %r13
xorq -80(%rdi), %r14
xorq -72(%rdi), %r15
xorq -64(%rdi), %rbx
xorq -56(%rdi), %rax
xorq -48(%rdi), %r8
xorq -40(%rdi), %r9
xorq -32(%rdi), %r10
movq %r12, %rcx
movq %r13, -88(%rdi)
movq %r14, -80(%rdi)
movq %r15, -72(%rdi)
movq %rbx, -64(%rdi)
movq %rax, -56(%rdi)
movq %r8, -48(%rdi)
movq %r9, -40(%rdi)
movq %r10, -32(%rdi)
movq 72(%rsi), %rax
movq 80(%rsi), %r8
movq 88(%rsi), %r9
movq 96(%rsi), %r10
xorq -24(%rdi), %rax
xorq -16(%rdi), %r8
xorq -8(%rdi), %r9
xorq (%rdi), %r10
movq %rax, -24(%rdi)
movq %r8, -16(%rdi)
movq %r9, -8(%rdi)
movq %r10, (%rdi)
jmp L_sha3_block_n_bmi2_rounds
L_sha3_block_n_bmi2_load_256:
movq (%rsi), %r12
movq 8(%rsi), %r13
movq 16(%rsi), %r14
movq 24(%rsi), %r15
movq 32(%rsi), %rbx
movq 40(%rsi), %rax
movq 48(%rsi), %r8
movq 56(%rsi), %r9
movq 64(%rsi), %r10
movq 72(%rsi), %r11
movq 80(%rsi), %rbp
xorq %rcx, %r12
xorq -88(%rdi), %r13
xorq -80(%rdi), %r14
xorq -72(%rdi), %r15
xorq -64(%rdi), %rbx
xorq -56(%rdi), %rax
xorq -48(%rdi), %r8
xorq -40(%rdi), %r9
xorq -32(%rdi), %r10
xorq -24(%rdi), %r11
xorq -16(%rdi), %rbp
movq %r12, %rcx
movq %r13, -88(%rdi)
movq %r14, -80(%rdi)
movq %r15, -72(%rdi)
movq %rbx, -64(%rdi)
movq %rax, -56(%rdi)
movq %r8, -48(%rdi)
movq %r9, -40(%rdi)
movq %r10, -32(%rdi)
movq %r11, -24(%rdi)
movq %rbp, -16(%rdi)
movq 88(%rsi), %rax
movq 96(%rsi), %r8
movq 104(%rsi), %r9
movq 112(%rsi), %r10
movq 120(%rsi), %r11
movq 128(%rsi), %rbp
xorq -8(%rdi), %rax
xorq (%rdi), %r8
xorq 8(%rdi), %r9
xorq 16(%rdi), %r10
xorq 24(%rdi), %r11
xorq 32(%rdi), %rbp
movq %rax, -8(%rdi)
movq %r8, (%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
movq %rbp, 32(%rdi)
L_sha3_block_n_bmi2_rounds:
# Round 0
xorq -56(%rdi), %r12
xorq -48(%rdi), %r13
xorq -40(%rdi), %r14
xorq -32(%rdi), %r15
xorq -24(%rdi), %rbx
xorq -16(%rdi), %r12
xorq -8(%rdi), %r13
xorq (%rdi), %r14
xorq 8(%rdi), %r15
xorq 16(%rdi), %rbx
xorq 24(%rdi), %r12
xorq 32(%rdi), %r13
xorq 40(%rdi), %r14
xorq 48(%rdi), %r15
xorq 56(%rdi), %rbx
xorq 64(%rdi), %r12
xorq 72(%rdi), %r13
xorq 80(%rdi), %r14
xorq 88(%rdi), %r15
xorq 96(%rdi), %rbx
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq -48(%rdi), %r13
movq (%rdi), %r14
movq 48(%rdi), %r15
movq 96(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -48(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, (%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 48(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 96(%rdi)
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq $0x01, %rcx
# Row 1
movq -72(%rdi), %r12
movq -24(%rdi), %r13
movq -16(%rdi), %r14
movq 32(%rdi), %r15
movq 80(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -24(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -16(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 32(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 80(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -72(%rdi)
# Row 2
movq -88(%rdi), %r12
movq -40(%rdi), %r13
movq 8(%rdi), %r14
movq 56(%rdi), %r15
movq 64(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -40(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 8(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 56(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 64(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -88(%rdi)
# Row 3
movq -64(%rdi), %r12
movq -56(%rdi), %r13
movq -8(%rdi), %r14
movq 40(%rdi), %r15
movq 88(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -56(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -8(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 40(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 88(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -64(%rdi)
# Row 4
xorq -80(%rdi), %r9
xorq -32(%rdi), %r10
xorq 16(%rdi), %r11
xorq 24(%rdi), %rax
xorq 72(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, -80(%rdi)
movq %r13, -32(%rdi)
movq %r14, 16(%rdi)
movq %r15, 24(%rdi)
movq %rbx, 72(%rdi)
# Round 1
xorq %rcx, %r12
xorq -88(%rdi), %r12
xorq -72(%rdi), %r12
xorq -64(%rdi), %r12
xorq -56(%rdi), %r13
xorq -48(%rdi), %r13
xorq -40(%rdi), %r13
xorq -24(%rdi), %r13
xorq -16(%rdi), %r14
xorq -8(%rdi), %r14
xorq (%rdi), %r14
xorq 8(%rdi), %r14
xorq 32(%rdi), %r15
xorq 40(%rdi), %r15
xorq 48(%rdi), %r15
xorq 56(%rdi), %r15
xorq 64(%rdi), %rbx
xorq 80(%rdi), %rbx
xorq 88(%rdi), %rbx
xorq 96(%rdi), %rbx
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq -24(%rdi), %r13
movq 8(%rdi), %r14
movq 40(%rdi), %r15
movq 72(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -24(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 8(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 40(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 72(%rdi)
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq $0x8082, %rcx
# Row 1
movq 48(%rdi), %r12
movq 80(%rdi), %r13
movq -88(%rdi), %r14
movq -56(%rdi), %r15
movq 16(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 80(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -88(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -56(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 16(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 48(%rdi)
# Row 2
movq -48(%rdi), %r12
movq -16(%rdi), %r13
movq 56(%rdi), %r14
movq 88(%rdi), %r15
movq -80(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -16(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 56(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 88(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -80(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -48(%rdi)
# Row 3
movq 96(%rdi), %r12
movq -72(%rdi), %r13
movq -40(%rdi), %r14
movq -8(%rdi), %r15
movq 24(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -72(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -40(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -8(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 24(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 96(%rdi)
# Row 4
xorq (%rdi), %r9
xorq 32(%rdi), %r10
xorq 64(%rdi), %r11
xorq -64(%rdi), %rax
xorq -32(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, (%rdi)
movq %r13, 32(%rdi)
movq %r14, 64(%rdi)
movq %r15, -64(%rdi)
movq %rbx, -32(%rdi)
# Round 2
xorq %rcx, %r12
xorq -88(%rdi), %r14
xorq -80(%rdi), %rbx
xorq -72(%rdi), %r13
xorq -56(%rdi), %r15
xorq -48(%rdi), %r12
xorq -40(%rdi), %r14
xorq -24(%rdi), %r13
xorq -16(%rdi), %r13
xorq -8(%rdi), %r15
xorq 8(%rdi), %r14
xorq 16(%rdi), %rbx
xorq 24(%rdi), %rbx
xorq 40(%rdi), %r15
xorq 48(%rdi), %r12
xorq 56(%rdi), %r14
xorq 72(%rdi), %rbx
xorq 80(%rdi), %r13
xorq 88(%rdi), %r15
xorq 96(%rdi), %r12
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq 80(%rdi), %r13
movq 56(%rdi), %r14
movq -8(%rdi), %r15
movq -32(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 80(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 56(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -8(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -32(%rdi)
movq $0x800000000000808a, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq 40(%rdi), %r12
movq 16(%rdi), %r13
movq -48(%rdi), %r14
movq -72(%rdi), %r15
movq 64(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 16(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -48(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -72(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 64(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 40(%rdi)
# Row 2
movq -24(%rdi), %r12
movq -88(%rdi), %r13
movq 88(%rdi), %r14
movq 24(%rdi), %r15
movq (%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -88(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 88(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 24(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, (%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -24(%rdi)
# Row 3
movq 72(%rdi), %r12
movq 48(%rdi), %r13
movq -16(%rdi), %r14
movq -40(%rdi), %r15
movq -64(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 48(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -16(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -40(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -64(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 72(%rdi)
# Row 4
xorq 8(%rdi), %r9
xorq -56(%rdi), %r10
xorq -80(%rdi), %r11
xorq 96(%rdi), %rax
xorq 32(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, 8(%rdi)
movq %r13, -56(%rdi)
movq %r14, -80(%rdi)
movq %r15, 96(%rdi)
movq %rbx, 32(%rdi)
# Round 3
xorq %rcx, %r12
xorq -88(%rdi), %r13
xorq -72(%rdi), %r15
xorq -64(%rdi), %rbx
xorq -48(%rdi), %r14
xorq -40(%rdi), %r15
xorq -32(%rdi), %rbx
xorq -24(%rdi), %r12
xorq -16(%rdi), %r14
xorq -8(%rdi), %r15
xorq (%rdi), %rbx
xorq 16(%rdi), %r13
xorq 24(%rdi), %r15
xorq 40(%rdi), %r12
xorq 48(%rdi), %r13
xorq 56(%rdi), %r14
xorq 64(%rdi), %rbx
xorq 72(%rdi), %r12
xorq 80(%rdi), %r13
xorq 88(%rdi), %r14
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq 16(%rdi), %r13
movq 88(%rdi), %r14
movq -40(%rdi), %r15
movq 32(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 16(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 88(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -40(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 32(%rdi)
movq $0x8000000080008000, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq -8(%rdi), %r12
movq 64(%rdi), %r13
movq -24(%rdi), %r14
movq 48(%rdi), %r15
movq -80(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 64(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -24(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 48(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -80(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -8(%rdi)
# Row 2
movq 80(%rdi), %r12
movq -48(%rdi), %r13
movq 24(%rdi), %r14
movq -64(%rdi), %r15
movq 8(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -48(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 24(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -64(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 8(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 80(%rdi)
# Row 3
movq -32(%rdi), %r12
movq 40(%rdi), %r13
movq -88(%rdi), %r14
movq -16(%rdi), %r15
movq 96(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 40(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -88(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -16(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 96(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -32(%rdi)
# Row 4
xorq 56(%rdi), %r9
xorq -72(%rdi), %r10
xorq (%rdi), %r11
xorq 72(%rdi), %rax
xorq -56(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, 56(%rdi)
movq %r13, -72(%rdi)
movq %r14, (%rdi)
movq %r15, 72(%rdi)
movq %rbx, -56(%rdi)
# Round 4
xorq %rcx, %r12
xorq -88(%rdi), %r14
xorq -80(%rdi), %rbx
xorq -64(%rdi), %r15
xorq -48(%rdi), %r13
xorq -40(%rdi), %r15
xorq -32(%rdi), %r12
xorq -24(%rdi), %r14
xorq -16(%rdi), %r15
xorq -8(%rdi), %r12
xorq 8(%rdi), %rbx
xorq 16(%rdi), %r13
xorq 24(%rdi), %r14
xorq 32(%rdi), %rbx
xorq 40(%rdi), %r13
xorq 48(%rdi), %r15
xorq 64(%rdi), %r13
xorq 80(%rdi), %r12
xorq 88(%rdi), %r14
xorq 96(%rdi), %rbx
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq 64(%rdi), %r13
movq 24(%rdi), %r14
movq -16(%rdi), %r15
movq -56(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 64(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 24(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -16(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -56(%rdi)
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq $0x808b, %rcx
# Row 1
movq -40(%rdi), %r12
movq -80(%rdi), %r13
movq 80(%rdi), %r14
movq 40(%rdi), %r15
movq (%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -80(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 80(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 40(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, (%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -40(%rdi)
# Row 2
movq 16(%rdi), %r12
movq -24(%rdi), %r13
movq -64(%rdi), %r14
movq 96(%rdi), %r15
movq 56(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -24(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -64(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 96(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 56(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 16(%rdi)
# Row 3
movq 32(%rdi), %r12
movq -8(%rdi), %r13
movq -48(%rdi), %r14
movq -88(%rdi), %r15
movq 72(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -8(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -48(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -88(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 72(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 32(%rdi)
# Row 4
xorq 88(%rdi), %r9
xorq 48(%rdi), %r10
xorq 8(%rdi), %r11
xorq -32(%rdi), %rax
xorq -72(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, 88(%rdi)
movq %r13, 48(%rdi)
movq %r14, 8(%rdi)
movq %r15, -32(%rdi)
movq %rbx, -72(%rdi)
# Round 5
xorq %rcx, %r12
xorq -88(%rdi), %r15
xorq -80(%rdi), %r13
xorq -64(%rdi), %r14
xorq -56(%rdi), %rbx
xorq -48(%rdi), %r14
xorq -40(%rdi), %r12
xorq -24(%rdi), %r13
xorq -16(%rdi), %r15
xorq -8(%rdi), %r13
xorq (%rdi), %rbx
xorq 16(%rdi), %r12
xorq 24(%rdi), %r14
xorq 32(%rdi), %r12
xorq 40(%rdi), %r15
xorq 56(%rdi), %rbx
xorq 64(%rdi), %r13
xorq 72(%rdi), %rbx
xorq 80(%rdi), %r14
xorq 96(%rdi), %r15
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq -80(%rdi), %r13
movq -64(%rdi), %r14
movq -88(%rdi), %r15
movq -72(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -80(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -64(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -88(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -72(%rdi)
movq $0x80000001, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq -16(%rdi), %r12
movq (%rdi), %r13
movq 16(%rdi), %r14
movq -8(%rdi), %r15
movq 8(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, (%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 16(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -8(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 8(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -16(%rdi)
# Row 2
movq 64(%rdi), %r12
movq 80(%rdi), %r13
movq 96(%rdi), %r14
movq 72(%rdi), %r15
movq 88(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 80(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 96(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 72(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 88(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 64(%rdi)
# Row 3
movq -56(%rdi), %r12
movq -40(%rdi), %r13
movq -24(%rdi), %r14
movq -48(%rdi), %r15
movq -32(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -40(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -24(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -48(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -32(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -56(%rdi)
# Row 4
xorq 24(%rdi), %r9
xorq 40(%rdi), %r10
xorq 56(%rdi), %r11
xorq 32(%rdi), %rax
xorq 48(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, 24(%rdi)
movq %r13, 40(%rdi)
movq %r14, 56(%rdi)
movq %r15, 32(%rdi)
movq %rbx, 48(%rdi)
# Round 6
xorq %rcx, %r12
xorq -88(%rdi), %r15
xorq -80(%rdi), %r13
xorq -72(%rdi), %rbx
xorq -64(%rdi), %r14
xorq -56(%rdi), %r12
xorq -48(%rdi), %r15
xorq -40(%rdi), %r13
xorq -32(%rdi), %rbx
xorq -24(%rdi), %r14
xorq -16(%rdi), %r12
xorq -8(%rdi), %r15
xorq (%rdi), %r13
xorq 8(%rdi), %rbx
xorq 16(%rdi), %r14
xorq 64(%rdi), %r12
xorq 72(%rdi), %r15
xorq 80(%rdi), %r13
xorq 88(%rdi), %rbx
xorq 96(%rdi), %r14
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq (%rdi), %r13
movq 96(%rdi), %r14
movq -48(%rdi), %r15
movq 48(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, (%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 96(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -48(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 48(%rdi)
movq $0x8000000080008081, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq -88(%rdi), %r12
movq 8(%rdi), %r13
movq 64(%rdi), %r14
movq -40(%rdi), %r15
movq 56(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 8(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 64(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -40(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 56(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -88(%rdi)
# Row 2
movq -80(%rdi), %r12
movq 16(%rdi), %r13
movq 72(%rdi), %r14
movq -32(%rdi), %r15
movq 24(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 16(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 72(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -32(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 24(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -80(%rdi)
# Row 3
movq -72(%rdi), %r12
movq -16(%rdi), %r13
movq 80(%rdi), %r14
movq -24(%rdi), %r15
movq 32(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -16(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 80(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -24(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 32(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -72(%rdi)
# Row 4
xorq -64(%rdi), %r9
xorq -8(%rdi), %r10
xorq 88(%rdi), %r11
xorq -56(%rdi), %rax
xorq 40(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, -64(%rdi)
movq %r13, -8(%rdi)
movq %r14, 88(%rdi)
movq %r15, -56(%rdi)
movq %rbx, 40(%rdi)
# Round 7
xorq %rcx, %r12
xorq -88(%rdi), %r12
xorq -80(%rdi), %r12
xorq -72(%rdi), %r12
xorq -48(%rdi), %r15
xorq -40(%rdi), %r15
xorq -32(%rdi), %r15
xorq -24(%rdi), %r15
xorq -16(%rdi), %r13
xorq (%rdi), %r13
xorq 8(%rdi), %r13
xorq 16(%rdi), %r13
xorq 24(%rdi), %rbx
xorq 32(%rdi), %rbx
xorq 48(%rdi), %rbx
xorq 56(%rdi), %rbx
xorq 64(%rdi), %r14
xorq 72(%rdi), %r14
xorq 80(%rdi), %r14
xorq 96(%rdi), %r14
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq 8(%rdi), %r13
movq 72(%rdi), %r14
movq -24(%rdi), %r15
movq 40(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 8(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 72(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -24(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 40(%rdi)
movq $0x8000000000008009, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq -48(%rdi), %r12
movq 56(%rdi), %r13
movq -80(%rdi), %r14
movq -16(%rdi), %r15
movq 88(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 56(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -80(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -16(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 88(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -48(%rdi)
# Row 2
movq (%rdi), %r12
movq 64(%rdi), %r13
movq -32(%rdi), %r14
movq 32(%rdi), %r15
movq -64(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 64(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -32(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 32(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -64(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, (%rdi)
# Row 3
movq 48(%rdi), %r12
movq -88(%rdi), %r13
movq 16(%rdi), %r14
movq 80(%rdi), %r15
movq -56(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -88(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 16(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 80(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -56(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 48(%rdi)
# Row 4
xorq 96(%rdi), %r9
xorq -40(%rdi), %r10
xorq 24(%rdi), %r11
xorq -72(%rdi), %rax
xorq -8(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, 96(%rdi)
movq %r13, -40(%rdi)
movq %r14, 24(%rdi)
movq %r15, -72(%rdi)
movq %rbx, -8(%rdi)
# Round 8
xorq %rcx, %r12
xorq -88(%rdi), %r13
xorq -80(%rdi), %r14
xorq -64(%rdi), %rbx
xorq -56(%rdi), %rbx
xorq -48(%rdi), %r12
xorq -32(%rdi), %r14
xorq -24(%rdi), %r15
xorq -16(%rdi), %r15
xorq (%rdi), %r12
xorq 8(%rdi), %r13
xorq 16(%rdi), %r14
xorq 32(%rdi), %r15
xorq 40(%rdi), %rbx
xorq 48(%rdi), %r12
xorq 56(%rdi), %r13
xorq 64(%rdi), %r13
xorq 72(%rdi), %r14
xorq 80(%rdi), %r15
xorq 88(%rdi), %rbx
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq 56(%rdi), %r13
movq -32(%rdi), %r14
movq 80(%rdi), %r15
movq -8(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 56(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -32(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 80(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -8(%rdi)
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq $0x8a, %rcx
# Row 1
movq -24(%rdi), %r12
movq 88(%rdi), %r13
movq (%rdi), %r14
movq -88(%rdi), %r15
movq 24(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 88(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, (%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -88(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 24(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -24(%rdi)
# Row 2
movq 8(%rdi), %r12
movq -80(%rdi), %r13
movq 32(%rdi), %r14
movq -56(%rdi), %r15
movq 96(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -80(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 32(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -56(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 96(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 8(%rdi)
# Row 3
movq 40(%rdi), %r12
movq -48(%rdi), %r13
movq 64(%rdi), %r14
movq 16(%rdi), %r15
movq -72(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -48(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 64(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 16(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -72(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 40(%rdi)
# Row 4
xorq 72(%rdi), %r9
xorq -16(%rdi), %r10
xorq -64(%rdi), %r11
xorq 48(%rdi), %rax
xorq -40(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, 72(%rdi)
movq %r13, -16(%rdi)
movq %r14, -64(%rdi)
movq %r15, 48(%rdi)
movq %rbx, -40(%rdi)
# Round 9
xorq %rcx, %r12
xorq -88(%rdi), %r15
xorq -80(%rdi), %r13
xorq -72(%rdi), %rbx
xorq -56(%rdi), %r15
xorq -48(%rdi), %r13
xorq -32(%rdi), %r14
xorq -24(%rdi), %r12
xorq -8(%rdi), %rbx
xorq (%rdi), %r14
xorq 8(%rdi), %r12
xorq 16(%rdi), %r15
xorq 24(%rdi), %rbx
xorq 32(%rdi), %r14
xorq 40(%rdi), %r12
xorq 56(%rdi), %r13
xorq 64(%rdi), %r14
xorq 80(%rdi), %r15
xorq 88(%rdi), %r13
xorq 96(%rdi), %rbx
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq 88(%rdi), %r13
movq 32(%rdi), %r14
movq 16(%rdi), %r15
movq -40(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 88(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 32(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 16(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -40(%rdi)
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq $0x88, %rcx
# Row 1
movq 80(%rdi), %r12
movq 24(%rdi), %r13
movq 8(%rdi), %r14
movq -48(%rdi), %r15
movq -64(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 24(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 8(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -48(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -64(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 80(%rdi)
# Row 2
movq 56(%rdi), %r12
movq (%rdi), %r13
movq -56(%rdi), %r14
movq -72(%rdi), %r15
movq 72(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, (%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -56(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -72(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 72(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 56(%rdi)
# Row 3
movq -8(%rdi), %r12
movq -24(%rdi), %r13
movq -80(%rdi), %r14
movq 64(%rdi), %r15
movq 48(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -24(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -80(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 64(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 48(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -8(%rdi)
# Row 4
xorq -32(%rdi), %r9
xorq -88(%rdi), %r10
xorq 96(%rdi), %r11
xorq 40(%rdi), %rax
xorq -16(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, -32(%rdi)
movq %r13, -88(%rdi)
movq %r14, 96(%rdi)
movq %r15, 40(%rdi)
movq %rbx, -16(%rdi)
# Round 10
xorq %rcx, %r12
xorq -80(%rdi), %r14
xorq -72(%rdi), %r15
xorq -64(%rdi), %rbx
xorq -56(%rdi), %r14
xorq -48(%rdi), %r15
xorq -40(%rdi), %rbx
xorq -24(%rdi), %r13
xorq -8(%rdi), %r12
xorq (%rdi), %r13
xorq 8(%rdi), %r14
xorq 16(%rdi), %r15
xorq 24(%rdi), %r13
xorq 32(%rdi), %r14
xorq 48(%rdi), %rbx
xorq 56(%rdi), %r12
xorq 64(%rdi), %r15
xorq 72(%rdi), %rbx
xorq 80(%rdi), %r12
xorq 88(%rdi), %r13
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq 24(%rdi), %r13
movq -56(%rdi), %r14
movq 64(%rdi), %r15
movq -16(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 24(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -56(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 64(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -16(%rdi)
movq $0x80008009, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq 16(%rdi), %r12
movq -64(%rdi), %r13
movq 56(%rdi), %r14
movq -24(%rdi), %r15
movq 96(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -64(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 56(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -24(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 96(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 16(%rdi)
# Row 2
movq 88(%rdi), %r12
movq 8(%rdi), %r13
movq -72(%rdi), %r14
movq 48(%rdi), %r15
movq -32(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 8(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -72(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 48(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -32(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 88(%rdi)
# Row 3
movq -40(%rdi), %r12
movq 80(%rdi), %r13
movq (%rdi), %r14
movq -80(%rdi), %r15
movq 40(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 80(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, (%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -80(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 40(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -40(%rdi)
# Row 4
xorq 32(%rdi), %r9
xorq -48(%rdi), %r10
xorq 72(%rdi), %r11
xorq -8(%rdi), %rax
xorq -88(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, 32(%rdi)
movq %r13, -48(%rdi)
movq %r14, 72(%rdi)
movq %r15, -8(%rdi)
movq %rbx, -88(%rdi)
# Round 11
xorq %rcx, %r12
xorq -80(%rdi), %r15
xorq -72(%rdi), %r14
xorq -64(%rdi), %r13
xorq -56(%rdi), %r14
xorq -40(%rdi), %r12
xorq -32(%rdi), %rbx
xorq -24(%rdi), %r15
xorq -16(%rdi), %rbx
xorq (%rdi), %r14
xorq 8(%rdi), %r13
xorq 16(%rdi), %r12
xorq 24(%rdi), %r13
xorq 40(%rdi), %rbx
xorq 48(%rdi), %r15
xorq 56(%rdi), %r14
xorq 64(%rdi), %r15
xorq 80(%rdi), %r13
xorq 88(%rdi), %r12
xorq 96(%rdi), %rbx
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq -64(%rdi), %r13
movq -72(%rdi), %r14
movq -80(%rdi), %r15
movq -88(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -64(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -72(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -80(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -88(%rdi)
movq $0x8000000a, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq 64(%rdi), %r12
movq 96(%rdi), %r13
movq 88(%rdi), %r14
movq 80(%rdi), %r15
movq 72(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 96(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 88(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 80(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 72(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 64(%rdi)
# Row 2
movq 24(%rdi), %r12
movq 56(%rdi), %r13
movq 48(%rdi), %r14
movq 40(%rdi), %r15
movq 32(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 56(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 48(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 40(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 32(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 24(%rdi)
# Row 3
movq -16(%rdi), %r12
movq 16(%rdi), %r13
movq 8(%rdi), %r14
movq (%rdi), %r15
movq -8(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 16(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 8(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, (%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -8(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -16(%rdi)
# Row 4
xorq -56(%rdi), %r9
xorq -24(%rdi), %r10
xorq -32(%rdi), %r11
xorq -40(%rdi), %rax
xorq -48(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, -56(%rdi)
movq %r13, -24(%rdi)
movq %r14, -32(%rdi)
movq %r15, -40(%rdi)
movq %rbx, -48(%rdi)
# Round 12
xorq %rcx, %r12
xorq -88(%rdi), %rbx
xorq -80(%rdi), %r15
xorq -72(%rdi), %r14
xorq -64(%rdi), %r13
xorq -16(%rdi), %r12
xorq -8(%rdi), %rbx
xorq (%rdi), %r15
xorq 8(%rdi), %r14
xorq 16(%rdi), %r13
xorq 24(%rdi), %r12
xorq 32(%rdi), %rbx
xorq 40(%rdi), %r15
xorq 48(%rdi), %r14
xorq 56(%rdi), %r13
xorq 64(%rdi), %r12
xorq 72(%rdi), %rbx
xorq 80(%rdi), %r15
xorq 88(%rdi), %r14
xorq 96(%rdi), %r13
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq 96(%rdi), %r13
movq 48(%rdi), %r14
movq (%rdi), %r15
movq -48(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 96(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 48(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, (%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -48(%rdi)
movq $0x8000808b, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq -80(%rdi), %r12
movq 72(%rdi), %r13
movq 24(%rdi), %r14
movq 16(%rdi), %r15
movq -32(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 72(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 24(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 16(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -32(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -80(%rdi)
# Row 2
movq -64(%rdi), %r12
movq 88(%rdi), %r13
movq 40(%rdi), %r14
movq -8(%rdi), %r15
movq -56(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 88(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 40(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -8(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -56(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -64(%rdi)
# Row 3
movq -88(%rdi), %r12
movq 64(%rdi), %r13
movq 56(%rdi), %r14
movq 8(%rdi), %r15
movq -40(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 64(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 56(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 8(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -40(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -88(%rdi)
# Row 4
xorq -72(%rdi), %r9
xorq 80(%rdi), %r10
xorq 32(%rdi), %r11
xorq -16(%rdi), %rax
xorq -24(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, -72(%rdi)
movq %r13, 80(%rdi)
movq %r14, 32(%rdi)
movq %r15, -16(%rdi)
movq %rbx, -24(%rdi)
# Round 13
xorq %rcx, %r12
xorq -88(%rdi), %r12
xorq -80(%rdi), %r12
xorq -64(%rdi), %r12
xorq -56(%rdi), %rbx
xorq -48(%rdi), %rbx
xorq -40(%rdi), %rbx
xorq -32(%rdi), %rbx
xorq -8(%rdi), %r15
xorq (%rdi), %r15
xorq 8(%rdi), %r15
xorq 16(%rdi), %r15
xorq 24(%rdi), %r14
xorq 40(%rdi), %r14
xorq 48(%rdi), %r14
xorq 56(%rdi), %r14
xorq 64(%rdi), %r13
xorq 72(%rdi), %r13
xorq 88(%rdi), %r13
xorq 96(%rdi), %r13
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq 72(%rdi), %r13
movq 40(%rdi), %r14
movq 8(%rdi), %r15
movq -24(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 72(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 40(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 8(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -24(%rdi)
movq $0x800000000000008b, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq (%rdi), %r12
movq -32(%rdi), %r13
movq -64(%rdi), %r14
movq 64(%rdi), %r15
movq 32(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -32(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -64(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 64(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 32(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, (%rdi)
# Row 2
movq 96(%rdi), %r12
movq 24(%rdi), %r13
movq -8(%rdi), %r14
movq -40(%rdi), %r15
movq -72(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 24(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -8(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -40(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -72(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 96(%rdi)
# Row 3
movq -48(%rdi), %r12
movq -80(%rdi), %r13
movq 88(%rdi), %r14
movq 56(%rdi), %r15
movq -16(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -80(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 88(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 56(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -16(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -48(%rdi)
# Row 4
xorq 48(%rdi), %r9
xorq 16(%rdi), %r10
xorq -56(%rdi), %r11
xorq -88(%rdi), %rax
xorq 80(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, 48(%rdi)
movq %r13, 16(%rdi)
movq %r14, -56(%rdi)
movq %r15, -88(%rdi)
movq %rbx, 80(%rdi)
# Round 14
xorq %rcx, %r12
xorq -80(%rdi), %r13
xorq -72(%rdi), %rbx
xorq -64(%rdi), %r14
xorq -48(%rdi), %r12
xorq -40(%rdi), %r15
xorq -32(%rdi), %r13
xorq -24(%rdi), %rbx
xorq -16(%rdi), %rbx
xorq -8(%rdi), %r14
xorq (%rdi), %r12
xorq 8(%rdi), %r15
xorq 24(%rdi), %r13
xorq 32(%rdi), %rbx
xorq 40(%rdi), %r14
xorq 56(%rdi), %r15
xorq 64(%rdi), %r15
xorq 72(%rdi), %r13
xorq 88(%rdi), %r14
xorq 96(%rdi), %r12
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq -32(%rdi), %r13
movq -8(%rdi), %r14
movq 56(%rdi), %r15
movq 80(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -32(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -8(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 56(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 80(%rdi)
movq $0x8000000000008089, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq 8(%rdi), %r12
movq 32(%rdi), %r13
movq 96(%rdi), %r14
movq -80(%rdi), %r15
movq -56(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 32(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 96(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -80(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -56(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 8(%rdi)
# Row 2
movq 72(%rdi), %r12
movq -64(%rdi), %r13
movq -40(%rdi), %r14
movq -16(%rdi), %r15
movq 48(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -64(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -40(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -16(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 48(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 72(%rdi)
# Row 3
movq -24(%rdi), %r12
movq (%rdi), %r13
movq 24(%rdi), %r14
movq 88(%rdi), %r15
movq -88(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, (%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 24(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 88(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -88(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -24(%rdi)
# Row 4
xorq 40(%rdi), %r9
xorq 64(%rdi), %r10
xorq -72(%rdi), %r11
xorq -48(%rdi), %rax
xorq 16(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, 40(%rdi)
movq %r13, 64(%rdi)
movq %r14, -72(%rdi)
movq %r15, -48(%rdi)
movq %rbx, 16(%rdi)
# Round 15
xorq %rcx, %r12
xorq -88(%rdi), %rbx
xorq -80(%rdi), %r15
xorq -64(%rdi), %r13
xorq -56(%rdi), %rbx
xorq -40(%rdi), %r14
xorq -32(%rdi), %r13
xorq -24(%rdi), %r12
xorq -16(%rdi), %r15
xorq -8(%rdi), %r14
xorq (%rdi), %r13
xorq 8(%rdi), %r12
xorq 24(%rdi), %r14
xorq 32(%rdi), %r13
xorq 48(%rdi), %rbx
xorq 56(%rdi), %r15
xorq 72(%rdi), %r12
xorq 80(%rdi), %rbx
xorq 88(%rdi), %r15
xorq 96(%rdi), %r14
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq 32(%rdi), %r13
movq -40(%rdi), %r14
movq 88(%rdi), %r15
movq 16(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 32(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -40(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 88(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 16(%rdi)
movq $0x8000000000008003, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq 56(%rdi), %r12
movq -56(%rdi), %r13
movq 72(%rdi), %r14
movq (%rdi), %r15
movq -72(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -56(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 72(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, (%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -72(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 56(%rdi)
# Row 2
movq -32(%rdi), %r12
movq 96(%rdi), %r13
movq -16(%rdi), %r14
movq -88(%rdi), %r15
movq 40(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 96(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -16(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -88(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 40(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -32(%rdi)
# Row 3
movq 80(%rdi), %r12
movq 8(%rdi), %r13
movq -64(%rdi), %r14
movq 24(%rdi), %r15
movq -48(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 8(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -64(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 24(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -48(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 80(%rdi)
# Row 4
xorq -8(%rdi), %r9
xorq -80(%rdi), %r10
xorq 48(%rdi), %r11
xorq -24(%rdi), %rax
xorq 64(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, -8(%rdi)
movq %r13, -80(%rdi)
movq %r14, 48(%rdi)
movq %r15, -24(%rdi)
movq %rbx, 64(%rdi)
# Round 16
xorq %rcx, %r12
xorq -88(%rdi), %r15
xorq -72(%rdi), %rbx
xorq -64(%rdi), %r14
xorq -56(%rdi), %r13
xorq -48(%rdi), %rbx
xorq -40(%rdi), %r14
xorq -32(%rdi), %r12
xorq -16(%rdi), %r14
xorq (%rdi), %r15
xorq 8(%rdi), %r13
xorq 16(%rdi), %rbx
xorq 24(%rdi), %r15
xorq 32(%rdi), %r13
xorq 40(%rdi), %rbx
xorq 56(%rdi), %r12
xorq 72(%rdi), %r14
xorq 80(%rdi), %r12
xorq 88(%rdi), %r15
xorq 96(%rdi), %r13
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq -56(%rdi), %r13
movq -16(%rdi), %r14
movq 24(%rdi), %r15
movq 64(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -56(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -16(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 24(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 64(%rdi)
movq $0x8000000000008002, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq 88(%rdi), %r12
movq -72(%rdi), %r13
movq -32(%rdi), %r14
movq 8(%rdi), %r15
movq 48(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -72(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -32(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 8(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 48(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 88(%rdi)
# Row 2
movq 32(%rdi), %r12
movq 72(%rdi), %r13
movq -88(%rdi), %r14
movq -48(%rdi), %r15
movq -8(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 72(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -88(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -48(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -8(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 32(%rdi)
# Row 3
movq 16(%rdi), %r12
movq 56(%rdi), %r13
movq 96(%rdi), %r14
movq -64(%rdi), %r15
movq -24(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 56(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 96(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -64(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -24(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 16(%rdi)
# Row 4
xorq -40(%rdi), %r9
xorq (%rdi), %r10
xorq 40(%rdi), %r11
xorq 80(%rdi), %rax
xorq -80(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, -40(%rdi)
movq %r13, (%rdi)
movq %r14, 40(%rdi)
movq %r15, 80(%rdi)
movq %rbx, -80(%rdi)
# Round 17
xorq %rcx, %r12
xorq -88(%rdi), %r14
xorq -72(%rdi), %r13
xorq -64(%rdi), %r15
xorq -56(%rdi), %r13
xorq -48(%rdi), %r15
xorq -32(%rdi), %r14
xorq -24(%rdi), %rbx
xorq -16(%rdi), %r14
xorq -8(%rdi), %rbx
xorq 8(%rdi), %r15
xorq 16(%rdi), %r12
xorq 24(%rdi), %r15
xorq 32(%rdi), %r12
xorq 48(%rdi), %rbx
xorq 56(%rdi), %r13
xorq 64(%rdi), %rbx
xorq 72(%rdi), %r13
xorq 88(%rdi), %r12
xorq 96(%rdi), %r14
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq -72(%rdi), %r13
movq -88(%rdi), %r14
movq -64(%rdi), %r15
movq -80(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -72(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -88(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -64(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -80(%rdi)
movq $0x8000000000000080, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq 24(%rdi), %r12
movq 48(%rdi), %r13
movq 32(%rdi), %r14
movq 56(%rdi), %r15
movq 40(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 48(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 32(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 56(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 40(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 24(%rdi)
# Row 2
movq -56(%rdi), %r12
movq -32(%rdi), %r13
movq -48(%rdi), %r14
movq -24(%rdi), %r15
movq -40(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -32(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -48(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -24(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -40(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -56(%rdi)
# Row 3
movq 64(%rdi), %r12
movq 88(%rdi), %r13
movq 72(%rdi), %r14
movq 96(%rdi), %r15
movq 80(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 88(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 72(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 96(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 80(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 64(%rdi)
# Row 4
xorq -16(%rdi), %r9
xorq 8(%rdi), %r10
xorq -8(%rdi), %r11
xorq 16(%rdi), %rax
xorq (%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, -16(%rdi)
movq %r13, 8(%rdi)
movq %r14, -8(%rdi)
movq %r15, 16(%rdi)
movq %rbx, (%rdi)
# Round 18
xorq %rcx, %r12
xorq -88(%rdi), %r14
xorq -80(%rdi), %rbx
xorq -72(%rdi), %r13
xorq -64(%rdi), %r15
xorq -56(%rdi), %r12
xorq -48(%rdi), %r14
xorq -40(%rdi), %rbx
xorq -32(%rdi), %r13
xorq -24(%rdi), %r15
xorq 24(%rdi), %r12
xorq 32(%rdi), %r14
xorq 40(%rdi), %rbx
xorq 48(%rdi), %r13
xorq 56(%rdi), %r15
xorq 64(%rdi), %r12
xorq 72(%rdi), %r14
xorq 80(%rdi), %rbx
xorq 88(%rdi), %r13
xorq 96(%rdi), %r15
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq 48(%rdi), %r13
movq -48(%rdi), %r14
movq 96(%rdi), %r15
movq (%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 48(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -48(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 96(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, (%rdi)
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq $0x800a, %rcx
# Row 1
movq -64(%rdi), %r12
movq 40(%rdi), %r13
movq -56(%rdi), %r14
movq 88(%rdi), %r15
movq -8(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 40(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -56(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 88(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -8(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -64(%rdi)
# Row 2
movq -72(%rdi), %r12
movq 32(%rdi), %r13
movq -24(%rdi), %r14
movq 80(%rdi), %r15
movq -16(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 32(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -24(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 80(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -16(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -72(%rdi)
# Row 3
movq -80(%rdi), %r12
movq 24(%rdi), %r13
movq -32(%rdi), %r14
movq 72(%rdi), %r15
movq 16(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 24(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -32(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 72(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 16(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -80(%rdi)
# Row 4
xorq -88(%rdi), %r9
xorq 56(%rdi), %r10
xorq -40(%rdi), %r11
xorq 64(%rdi), %rax
xorq 8(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, -88(%rdi)
movq %r13, 56(%rdi)
movq %r14, -40(%rdi)
movq %r15, 64(%rdi)
movq %rbx, 8(%rdi)
# Round 19
xorq %rcx, %r12
xorq -80(%rdi), %r12
xorq -72(%rdi), %r12
xorq -64(%rdi), %r12
xorq -56(%rdi), %r14
xorq -48(%rdi), %r14
xorq -32(%rdi), %r14
xorq -24(%rdi), %r14
xorq -16(%rdi), %rbx
xorq -8(%rdi), %rbx
xorq (%rdi), %rbx
xorq 16(%rdi), %rbx
xorq 24(%rdi), %r13
xorq 32(%rdi), %r13
xorq 40(%rdi), %r13
xorq 48(%rdi), %r13
xorq 72(%rdi), %r15
xorq 80(%rdi), %r15
xorq 88(%rdi), %r15
xorq 96(%rdi), %r15
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq 40(%rdi), %r13
movq -24(%rdi), %r14
movq 72(%rdi), %r15
movq 8(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 40(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -24(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 72(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 8(%rdi)
movq $0x800000008000000a, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq 96(%rdi), %r12
movq -8(%rdi), %r13
movq -72(%rdi), %r14
movq 24(%rdi), %r15
movq -40(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -8(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -72(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 24(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -40(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 96(%rdi)
# Row 2
movq 48(%rdi), %r12
movq -56(%rdi), %r13
movq 80(%rdi), %r14
movq 16(%rdi), %r15
movq -88(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -56(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 80(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 16(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -88(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 48(%rdi)
# Row 3
movq (%rdi), %r12
movq -64(%rdi), %r13
movq 32(%rdi), %r14
movq -32(%rdi), %r15
movq 64(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -64(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 32(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -32(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 64(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, (%rdi)
# Row 4
xorq -48(%rdi), %r9
xorq 88(%rdi), %r10
xorq -16(%rdi), %r11
xorq -80(%rdi), %rax
xorq 56(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, -48(%rdi)
movq %r13, 88(%rdi)
movq %r14, -16(%rdi)
movq %r15, -80(%rdi)
movq %rbx, 56(%rdi)
# Round 20
xorq %rcx, %r12
xorq -88(%rdi), %rbx
xorq -72(%rdi), %r14
xorq -64(%rdi), %r13
xorq -56(%rdi), %r13
xorq -40(%rdi), %rbx
xorq -32(%rdi), %r15
xorq -24(%rdi), %r14
xorq -8(%rdi), %r13
xorq (%rdi), %r12
xorq 8(%rdi), %rbx
xorq 16(%rdi), %r15
xorq 24(%rdi), %r15
xorq 32(%rdi), %r14
xorq 40(%rdi), %r13
xorq 48(%rdi), %r12
xorq 64(%rdi), %rbx
xorq 72(%rdi), %r15
xorq 80(%rdi), %r14
xorq 96(%rdi), %r12
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq -8(%rdi), %r13
movq 80(%rdi), %r14
movq -32(%rdi), %r15
movq 56(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -8(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 80(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -32(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 56(%rdi)
movq $0x8000000080008081, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq 72(%rdi), %r12
movq -40(%rdi), %r13
movq 48(%rdi), %r14
movq -64(%rdi), %r15
movq -16(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -40(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 48(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -64(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -16(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 72(%rdi)
# Row 2
movq 40(%rdi), %r12
movq -72(%rdi), %r13
movq 16(%rdi), %r14
movq 64(%rdi), %r15
movq -48(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -72(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 16(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 64(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -48(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 40(%rdi)
# Row 3
movq 8(%rdi), %r12
movq 96(%rdi), %r13
movq -56(%rdi), %r14
movq 32(%rdi), %r15
movq -80(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 96(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -56(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 32(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -80(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 8(%rdi)
# Row 4
xorq -24(%rdi), %r9
xorq 24(%rdi), %r10
xorq -88(%rdi), %r11
xorq (%rdi), %rax
xorq 88(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, -24(%rdi)
movq %r13, 24(%rdi)
movq %r14, -88(%rdi)
movq %r15, (%rdi)
movq %rbx, 88(%rdi)
# Round 21
xorq %rcx, %r12
xorq -80(%rdi), %rbx
xorq -72(%rdi), %r13
xorq -64(%rdi), %r15
xorq -56(%rdi), %r14
xorq -48(%rdi), %rbx
xorq -40(%rdi), %r13
xorq -32(%rdi), %r15
xorq -16(%rdi), %rbx
xorq -8(%rdi), %r13
xorq 8(%rdi), %r12
xorq 16(%rdi), %r14
xorq 32(%rdi), %r15
xorq 40(%rdi), %r12
xorq 48(%rdi), %r14
xorq 56(%rdi), %rbx
xorq 64(%rdi), %r15
xorq 72(%rdi), %r12
xorq 80(%rdi), %r14
xorq 96(%rdi), %r13
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq -40(%rdi), %r13
movq 16(%rdi), %r14
movq 32(%rdi), %r15
movq 88(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -40(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 16(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 32(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 88(%rdi)
movq $0x8000000000008080, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq -32(%rdi), %r12
movq -16(%rdi), %r13
movq 40(%rdi), %r14
movq 96(%rdi), %r15
movq -88(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -16(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 40(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 96(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -88(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -32(%rdi)
# Row 2
movq -8(%rdi), %r12
movq 48(%rdi), %r13
movq 64(%rdi), %r14
movq -80(%rdi), %r15
movq -24(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 48(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 64(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -80(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -24(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -8(%rdi)
# Row 3
movq 56(%rdi), %r12
movq 72(%rdi), %r13
movq -72(%rdi), %r14
movq -56(%rdi), %r15
movq (%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 72(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -72(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -56(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, (%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 56(%rdi)
# Row 4
xorq 80(%rdi), %r9
xorq -64(%rdi), %r10
xorq -48(%rdi), %r11
xorq 8(%rdi), %rax
xorq 24(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, 80(%rdi)
movq %r13, -64(%rdi)
movq %r14, -48(%rdi)
movq %r15, 8(%rdi)
movq %rbx, 24(%rdi)
# Round 22
xorq %rcx, %r12
xorq -88(%rdi), %rbx
xorq -80(%rdi), %r15
xorq -72(%rdi), %r14
xorq -56(%rdi), %r15
xorq -40(%rdi), %r13
xorq -32(%rdi), %r12
xorq -24(%rdi), %rbx
xorq -16(%rdi), %r13
xorq -8(%rdi), %r12
xorq (%rdi), %rbx
xorq 16(%rdi), %r14
xorq 32(%rdi), %r15
xorq 40(%rdi), %r14
xorq 48(%rdi), %r13
xorq 56(%rdi), %r12
xorq 64(%rdi), %r14
xorq 72(%rdi), %r13
xorq 88(%rdi), %rbx
xorq 96(%rdi), %r15
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq -16(%rdi), %r13
movq 64(%rdi), %r14
movq -56(%rdi), %r15
movq 24(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -16(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 64(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -56(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 24(%rdi)
movq $0x80000001, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq 32(%rdi), %r12
movq -88(%rdi), %r13
movq -8(%rdi), %r14
movq 72(%rdi), %r15
movq -48(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -88(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -8(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 72(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -48(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 32(%rdi)
# Row 2
movq -40(%rdi), %r12
movq 40(%rdi), %r13
movq -80(%rdi), %r14
movq (%rdi), %r15
movq 80(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 40(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -80(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, (%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 80(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -40(%rdi)
# Row 3
movq 88(%rdi), %r12
movq -32(%rdi), %r13
movq 48(%rdi), %r14
movq -72(%rdi), %r15
movq 8(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -32(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 48(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -72(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 8(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 88(%rdi)
# Row 4
xorq 16(%rdi), %r9
xorq 96(%rdi), %r10
xorq -24(%rdi), %r11
xorq 56(%rdi), %rax
xorq -64(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, 16(%rdi)
movq %r13, 96(%rdi)
movq %r14, -24(%rdi)
movq %r15, 56(%rdi)
movq %rbx, -64(%rdi)
# Round 23
xorq %rcx, %r12
xorq -88(%rdi), %r13
xorq -80(%rdi), %r14
xorq -72(%rdi), %r15
xorq -56(%rdi), %r15
xorq -48(%rdi), %rbx
xorq -40(%rdi), %r12
xorq -32(%rdi), %r13
xorq -16(%rdi), %r13
xorq -8(%rdi), %r14
xorq (%rdi), %r15
xorq 8(%rdi), %rbx
xorq 24(%rdi), %rbx
xorq 32(%rdi), %r12
xorq 40(%rdi), %r13
xorq 48(%rdi), %r14
xorq 64(%rdi), %r14
xorq 72(%rdi), %r15
xorq 80(%rdi), %rbx
xorq 88(%rdi), %r12
# Calc t[0..4]
rorxq $63, %r13, %rax
rorxq $63, %r14, %r8
rorxq $63, %r15, %r9
rorxq $63, %rbx, %r10
rorxq $63, %r12, %r11
xorq %rbx, %rax
xorq %r12, %r8
xorq %r13, %r9
xorq %r14, %r10
xorq %r15, %r11
# Row Mix
# Row 0
movq %rcx, %r12
movq -88(%rdi), %r13
movq -80(%rdi), %r14
movq -72(%rdi), %r15
movq -64(%rdi), %rbx
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
rolq $44, %r13
rolq $43, %r14
rolq $21, %r15
rolq $14, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -88(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -80(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -72(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -64(%rdi)
movq $0x8000000080008008, %rbx
andnq %r14, %r13, %rcx
xorq %r12, %rcx
# XOR in constant
xorq %rbx, %rcx
# Row 1
movq -56(%rdi), %r12
movq -48(%rdi), %r13
movq -40(%rdi), %r14
movq -32(%rdi), %r15
movq -24(%rdi), %rbx
xorq %r10, %r12
xorq %r11, %r13
xorq %rax, %r14
xorq %r8, %r15
xorq %r9, %rbx
rolq $28, %r12
rolq $20, %r13
rolq $3, %r14
rolq $45, %r15
rolq $61, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -48(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, -40(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, -32(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, -24(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -56(%rdi)
# Row 2
movq -16(%rdi), %r12
movq -8(%rdi), %r13
movq (%rdi), %r14
movq 8(%rdi), %r15
movq 16(%rdi), %rbx
xorq %r8, %r12
xorq %r9, %r13
xorq %r10, %r14
xorq %r11, %r15
xorq %rax, %rbx
rolq $0x01, %r12
rolq $6, %r13
rolq $25, %r14
rolq $8, %r15
rolq $18, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, -8(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, (%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 8(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 16(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, -16(%rdi)
# Row 3
movq 24(%rdi), %r12
movq 32(%rdi), %r13
movq 40(%rdi), %r14
movq 48(%rdi), %r15
movq 56(%rdi), %rbx
xorq %r11, %r12
xorq %rax, %r13
xorq %r8, %r14
xorq %r9, %r15
xorq %r10, %rbx
rolq $27, %r12
rolq $36, %r13
rolq $10, %r14
rolq $15, %r15
rolq $56, %rbx
andnq %r15, %r14, %rbp
xorq %r13, %rbp
movq %rbp, 32(%rdi)
andnq %rbx, %r15, %rbp
xorq %r14, %rbp
movq %rbp, 40(%rdi)
andnq %r12, %rbx, %rbp
xorq %r15, %rbp
movq %rbp, 48(%rdi)
andnq %r13, %r12, %rbp
xorq %rbx, %rbp
movq %rbp, 56(%rdi)
andnq %r14, %r13, %rbp
xorq %r12, %rbp
movq %rbp, 24(%rdi)
# Row 4
xorq 64(%rdi), %r9
xorq 72(%rdi), %r10
xorq 80(%rdi), %r11
xorq 88(%rdi), %rax
xorq 96(%rdi), %r8
rorxq $2, %r9, %r12
rorxq $9, %r10, %r13
rorxq $25, %r11, %r14
rorxq $23, %rax, %r15
rorxq $62, %r8, %rbx
andnq %r14, %r13, %rax
andnq %r15, %r14, %r8
andnq %rbx, %r15, %r9
andnq %r12, %rbx, %r10
andnq %r13, %r12, %r11
xorq %rax, %r12
xorq %r8, %r13
xorq %r9, %r14
xorq %r10, %r15
xorq %r11, %rbx
movq %r12, 64(%rdi)
movq %r13, 72(%rdi)
movq %r14, 80(%rdi)
movq %r15, 88(%rdi)
movq %rbx, 96(%rdi)
addq (%rsp), %rsi
subl $0x01, %edx
movq (%rsp), %rbp
jg L_sha3_block_n_bmi2_start
movq %rcx, -96(%rdi)
popq %rbp
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size sha3_block_n_bmi2,.-sha3_block_n_bmi2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_sha3_block_avx2_rotl:
.quad 0x1,0x3e
.quad 0x1c,0x1b
.quad 0x2c,0x6
.quad 0x37,0x14
.quad 0xa,0x2b
.quad 0x19,0x27
.quad 0x2d,0xf
.quad 0x15,0x8
.quad 0x24,0x3
.quad 0x29,0x12
.quad 0x2,0x3d
.quad 0x38,0xe
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_sha3_block_avx2_rotr:
.quad 0x3f,0x2
.quad 0x24,0x25
.quad 0x14,0x3a
.quad 0x9,0x2c
.quad 0x36,0x15
.quad 0x27,0x19
.quad 0x13,0x31
.quad 0x2b,0x38
.quad 0x1c,0x3d
.quad 0x17,0x2e
.quad 0x3e,0x3
.quad 0x8,0x32
#ifndef __APPLE__
.text
.globl sha3_block_avx2
.type sha3_block_avx2,@function
.align 16
sha3_block_avx2:
#else
.section __TEXT,__text
.globl _sha3_block_avx2
.p2align 4
_sha3_block_avx2:
#endif /* __APPLE__ */
leaq L_sha3_avx2_r(%rip), %rdx
leaq L_sha3_block_avx2_rotl(%rip), %rax
addq $0x40, %rax
leaq L_sha3_block_avx2_rotr(%rip), %rcx
addq $0x40, %rcx
movq $24, %r8
vpbroadcastq (%rdi), %ymm0
vmovdqu 8(%rdi), %ymm1
vmovdqu 40(%rdi), %ymm2
vmovdqu 72(%rdi), %ymm3
vmovdqu 104(%rdi), %ymm4
vmovdqu 136(%rdi), %ymm5
vmovdqu 168(%rdi), %ymm6
vpermq $57, %ymm2, %ymm7
vpermq $30, %ymm3, %ymm8
vpermq $0x4b, %ymm4, %ymm9
vpermq $0x93, %ymm5, %ymm10
vpblendd $12, %ymm3, %ymm2, %ymm11
vpblendd $0xc0, %ymm5, %ymm4, %ymm12
vpblendd $0xc0, %ymm8, %ymm7, %ymm2
vpblendd $0xf0, %ymm9, %ymm8, %ymm3
vpblendd $3, %ymm9, %ymm10, %ymm4
vpblendd $0xf0, %ymm12, %ymm11, %ymm5
L_sha3_block_avx2_start:
# Calc b[0..4]
vpshufd $0xee, %ymm5, %ymm7
vpxor %ymm7, %ymm5, %ymm14
vpxor %ymm2, %ymm1, %ymm15
vpermq $0xaa, %ymm14, %ymm7
vpxor %ymm0, %ymm14, %ymm14
vpxor %ymm4, %ymm3, %ymm12
vpxor %ymm7, %ymm14, %ymm14
vpermq $0x00, %ymm14, %ymm14
vpxor %ymm6, %ymm15, %ymm15
vpxor %ymm12, %ymm15, %ymm15
# XOR in b[x+4]
vpermq $0x93, %ymm15, %ymm7
vpermq $57, %ymm15, %ymm9
vpermq $0xff, %ymm15, %ymm8
vpermq $0x00, %ymm15, %ymm10
vpblendd $3, %ymm14, %ymm7, %ymm7
vpblendd $0xc0, %ymm14, %ymm9, %ymm9
vpxor %ymm8, %ymm0, %ymm0
vpxor %ymm7, %ymm1, %ymm1
vpxor %ymm7, %ymm2, %ymm2
vpxor %ymm7, %ymm3, %ymm3
vpxor %ymm7, %ymm4, %ymm4
vpxor %ymm8, %ymm5, %ymm5
vpxor %ymm7, %ymm6, %ymm6
# Rotate left 1
vpsrlq $63, %ymm9, %ymm7
vpsrlq $63, %ymm10, %ymm8
vpaddq %ymm9, %ymm9, %ymm9
vpaddq %ymm10, %ymm10, %ymm10
vpor %ymm7, %ymm9, %ymm9
vpor %ymm8, %ymm10, %ymm10
# XOR in ROTL64(b[x+1])
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm9, %ymm1, %ymm1
vpxor %ymm9, %ymm2, %ymm2
vpxor %ymm9, %ymm3, %ymm3
vpxor %ymm9, %ymm4, %ymm4
vpxor %ymm10, %ymm5, %ymm5
vpxor %ymm9, %ymm6, %ymm6
# Shuffle - Rotate
vpsrlvq -64(%rcx), %ymm1, %ymm8
vpsrlvq -32(%rcx), %ymm2, %ymm9
vpsrlvq (%rcx), %ymm3, %ymm10
vpsrlvq 32(%rcx), %ymm4, %ymm11
vpsrlvq 64(%rcx), %ymm5, %ymm12
vpsrlvq 96(%rcx), %ymm6, %ymm13
vpsllvq -64(%rax), %ymm1, %ymm1
vpsllvq -32(%rax), %ymm2, %ymm2
vpsllvq (%rax), %ymm3, %ymm3
vpsllvq 32(%rax), %ymm4, %ymm4
vpsllvq 64(%rax), %ymm5, %ymm5
vpsllvq 96(%rax), %ymm6, %ymm6
vpor %ymm8, %ymm1, %ymm1
vpor %ymm9, %ymm2, %ymm2
vpor %ymm10, %ymm3, %ymm3
vpor %ymm11, %ymm4, %ymm4
vpor %ymm12, %ymm5, %ymm5
vpor %ymm13, %ymm6, %ymm6
# Row Mix
vpermq $0x00, %ymm2, %ymm12
vpermq $0x55, %ymm3, %ymm13
vpermq $0xaa, %ymm4, %ymm14
vpermq $0xff, %ymm6, %ymm15
vpandn %ymm14, %ymm13, %ymm7
vpandn %ymm15, %ymm14, %ymm8
vpandn %ymm0, %ymm15, %ymm9
vpandn %ymm12, %ymm0, %ymm10
vpandn %ymm13, %ymm12, %ymm11
vpxor %ymm7, %ymm12, %ymm12
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm0, %ymm0
vpermq $0x8d, %ymm5, %ymm7
vpblendd $12, %ymm13, %ymm12, %ymm10
vpermq $0x72, %ymm1, %ymm11
vpblendd $0xc0, %ymm15, %ymm14, %ymm9
vpermq $0x87, %ymm2, %ymm12
vpblendd $0xf0, %ymm9, %ymm10, %ymm1
vpermq $0xc9, %ymm3, %ymm13
vpermq $0x9c, %ymm4, %ymm14
vpermq $45, %ymm6, %ymm15
vpblendd $48, %ymm7, %ymm12, %ymm12
vpblendd $3, %ymm7, %ymm13, %ymm13
vpblendd $0xc0, %ymm7, %ymm14, %ymm14
vpblendd $12, %ymm7, %ymm15, %ymm15
vpandn %ymm13, %ymm12, %ymm5
vpandn %ymm14, %ymm13, %ymm7
vpandn %ymm15, %ymm14, %ymm2
vpandn %ymm11, %ymm15, %ymm3
vpandn %ymm12, %ymm11, %ymm4
vpxor %ymm5, %ymm11, %ymm5
vpxor %ymm7, %ymm12, %ymm12
vpxor %ymm2, %ymm13, %ymm13
vpxor %ymm3, %ymm14, %ymm14
vpxor %ymm4, %ymm15, %ymm15
vpunpcklqdq %ymm13, %ymm12, %ymm2
vpunpckhqdq %ymm13, %ymm12, %ymm3
vpunpcklqdq %ymm15, %ymm14, %ymm7
vpunpckhqdq %ymm15, %ymm14, %ymm8
vperm2i128 $49, %ymm7, %ymm2, %ymm4
vperm2i128 $49, %ymm8, %ymm3, %ymm6
vperm2i128 $32, %ymm7, %ymm2, %ymm2
vperm2i128 $32, %ymm8, %ymm3, %ymm3
vpxor (%rdx), %ymm0, %ymm0
addq $32, %rdx
subq $0x01, %r8
jnz L_sha3_block_avx2_start
vpermq $0x93, %ymm2, %ymm7
vpermq $0x4e, %ymm3, %ymm8
vpermq $57, %ymm4, %ymm9
vpblendd $3, %ymm5, %ymm7, %ymm2
vpblendd $3, %ymm7, %ymm8, %ymm3
vpblendd $12, %ymm5, %ymm3, %ymm3
vpblendd $0xc0, %ymm9, %ymm8, %ymm4
vpblendd $48, %ymm5, %ymm4, %ymm4
vpblendd $0xc0, %ymm5, %ymm9, %ymm5
vmovq %xmm0, (%rdi)
vmovdqu %ymm1, 8(%rdi)
vmovdqu %ymm2, 40(%rdi)
vmovdqu %ymm3, 72(%rdi)
vmovdqu %ymm4, 104(%rdi)
vmovdqu %ymm5, 136(%rdi)
vmovdqu %ymm6, 168(%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size sha3_block_avx2,.-sha3_block_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl kyber_sha3_blocksx4_avx2
.type kyber_sha3_blocksx4_avx2,@function
.align 16
kyber_sha3_blocksx4_avx2:
#else
.section __TEXT,__text
.globl _kyber_sha3_blocksx4_avx2
.p2align 4
_kyber_sha3_blocksx4_avx2:
#endif /* __APPLE__ */
leaq L_sha3_x4_avx2_r(%rip), %rdx
vmovdqu (%rdi), %ymm15
movq %rdi, %rax
movq %rdi, %rcx
addq $0x80, %rdi
addq $0x180, %rax
addq $0x280, %rcx
# Round 0
# Calc b[0..4]
vmovdqu -96(%rdi), %ymm11
vmovdqu -64(%rdi), %ymm12
vmovdqu -32(%rdi), %ymm13
vmovdqu (%rdi), %ymm14
vpxor 32(%rdi), %ymm15, %ymm10
vpxor 64(%rdi), %ymm11, %ymm11
vpxor 96(%rdi), %ymm12, %ymm12
vpxor 128(%rdi), %ymm13, %ymm13
vpxor -96(%rax), %ymm14, %ymm14
vpxor -64(%rax), %ymm10, %ymm10
vpxor -32(%rax), %ymm11, %ymm11
vpxor (%rax), %ymm12, %ymm12
vpxor 32(%rax), %ymm13, %ymm13
vpxor 64(%rax), %ymm14, %ymm14
vpxor 96(%rax), %ymm10, %ymm10
vpxor 128(%rax), %ymm11, %ymm11
vpxor -96(%rcx), %ymm12, %ymm12
vpxor -64(%rcx), %ymm13, %ymm13
vpxor -32(%rcx), %ymm14, %ymm14
vpxor (%rcx), %ymm10, %ymm10
vpxor 32(%rcx), %ymm11, %ymm11
vpxor 64(%rcx), %ymm12, %ymm12
vpxor 96(%rcx), %ymm13, %ymm13
vpxor 128(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 64(%rdi), %ymm6, %ymm11
vpxor (%rax), %ymm7, %ymm12
vpxor -64(%rcx), %ymm8, %ymm13
vpxor 128(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor (%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, 128(%rcx)
# Row 1
vpxor -32(%rdi), %ymm8, %ymm10
vpxor -96(%rax), %ymm9, %ymm11
vpxor -64(%rax), %ymm5, %ymm12
vpxor 128(%rax), %ymm6, %ymm13
vpxor 64(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rdi)
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, 64(%rcx)
# Row 2
vpxor -96(%rdi), %ymm6, %ymm10
vpxor 96(%rdi), %ymm7, %ymm11
vpxor 32(%rax), %ymm8, %ymm12
vpxor -32(%rcx), %ymm9, %ymm13
vpxor (%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rdi)
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, (%rcx)
# Row 3
vpxor (%rdi), %ymm9, %ymm10
vpxor 32(%rdi), %ymm5, %ymm11
vpxor -32(%rax), %ymm6, %ymm12
vpxor -96(%rcx), %ymm7, %ymm13
vpxor 96(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, 96(%rcx)
# Row 4
vpxor -64(%rdi), %ymm7, %ymm10
vpxor 128(%rdi), %ymm8, %ymm11
vpxor 64(%rax), %ymm9, %ymm12
vpxor 96(%rax), %ymm5, %ymm13
vpxor 32(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rdi)
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, 32(%rcx)
# Round 1
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm10, %ymm10
vpxor -32(%rdi), %ymm10, %ymm10
vpxor (%rdi), %ymm10, %ymm10
vpxor 32(%rdi), %ymm1, %ymm11
vpxor 64(%rdi), %ymm11, %ymm11
vpxor 96(%rdi), %ymm11, %ymm11
vpxor -96(%rax), %ymm11, %ymm11
vpxor -64(%rax), %ymm2, %ymm12
vpxor -32(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm12, %ymm12
vpxor 32(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm3, %ymm13
vpxor -96(%rcx), %ymm13, %ymm13
vpxor -64(%rcx), %ymm13, %ymm13
vpxor -32(%rcx), %ymm13, %ymm13
vpxor (%rcx), %ymm4, %ymm14
vpxor 64(%rcx), %ymm14, %ymm14
vpxor 96(%rcx), %ymm14, %ymm14
vpxor 128(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -96(%rax), %ymm6, %ymm11
vpxor 32(%rax), %ymm7, %ymm12
vpxor -96(%rcx), %ymm8, %ymm13
vpxor 32(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 32(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, 32(%rcx)
# Row 1
vpxor -64(%rcx), %ymm8, %ymm10
vpxor 64(%rcx), %ymm9, %ymm11
vpxor -96(%rdi), %ymm5, %ymm12
vpxor 32(%rdi), %ymm6, %ymm13
vpxor 64(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rcx)
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, 64(%rax)
# Row 2
vpxor 64(%rdi), %ymm6, %ymm10
vpxor -64(%rax), %ymm7, %ymm11
vpxor -32(%rcx), %ymm8, %ymm12
vpxor 96(%rcx), %ymm9, %ymm13
vpxor -64(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, -64(%rdi)
# Row 3
vpxor 128(%rcx), %ymm9, %ymm10
vpxor -32(%rdi), %ymm5, %ymm11
vpxor 96(%rdi), %ymm6, %ymm12
vpxor -32(%rax), %ymm7, %ymm13
vpxor 96(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rcx)
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, 96(%rax)
# Row 4
vpxor (%rax), %ymm7, %ymm10
vpxor 128(%rax), %ymm8, %ymm11
vpxor (%rcx), %ymm9, %ymm12
vpxor (%rdi), %ymm5, %ymm13
vpxor 128(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rax)
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, 128(%rdi)
# Round 2
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm2, %ymm12
vpxor -64(%rdi), %ymm4, %ymm14
vpxor -32(%rdi), %ymm1, %ymm11
vpxor 32(%rdi), %ymm3, %ymm13
vpxor 64(%rdi), %ymm10, %ymm10
vpxor 96(%rdi), %ymm12, %ymm12
vpxor -96(%rax), %ymm11, %ymm11
vpxor -64(%rax), %ymm11, %ymm11
vpxor -32(%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm12, %ymm12
vpxor 64(%rax), %ymm14, %ymm14
vpxor 96(%rax), %ymm14, %ymm14
vpxor -96(%rcx), %ymm13, %ymm13
vpxor -64(%rcx), %ymm10, %ymm10
vpxor -32(%rcx), %ymm12, %ymm12
vpxor 32(%rcx), %ymm14, %ymm14
vpxor 64(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm13, %ymm13
vpxor 128(%rcx), %ymm10, %ymm10
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 64(%rcx), %ymm6, %ymm11
vpxor -32(%rcx), %ymm7, %ymm12
vpxor -32(%rax), %ymm8, %ymm13
vpxor 128(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 64(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, 128(%rdi)
# Row 1
vpxor -96(%rcx), %ymm8, %ymm10
vpxor 64(%rax), %ymm9, %ymm11
vpxor 64(%rdi), %ymm5, %ymm12
vpxor -32(%rdi), %ymm6, %ymm13
vpxor (%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rcx)
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, (%rcx)
# Row 2
vpxor -96(%rax), %ymm6, %ymm10
vpxor -96(%rdi), %ymm7, %ymm11
vpxor 96(%rcx), %ymm8, %ymm12
vpxor 96(%rax), %ymm9, %ymm13
vpxor (%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rax)
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, (%rax)
# Row 3
vpxor 32(%rcx), %ymm9, %ymm10
vpxor -64(%rcx), %ymm5, %ymm11
vpxor -64(%rax), %ymm6, %ymm12
vpxor 96(%rdi), %ymm7, %ymm13
vpxor (%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rcx)
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, (%rdi)
# Row 4
vpxor 32(%rax), %ymm7, %ymm10
vpxor 32(%rdi), %ymm8, %ymm11
vpxor -64(%rdi), %ymm9, %ymm12
vpxor 128(%rcx), %ymm5, %ymm13
vpxor 128(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rax)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, 128(%rax)
# Round 3
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm1, %ymm11
vpxor -32(%rdi), %ymm3, %ymm13
vpxor (%rdi), %ymm4, %ymm14
vpxor 64(%rdi), %ymm2, %ymm12
vpxor 96(%rdi), %ymm13, %ymm13
vpxor 128(%rdi), %ymm14, %ymm14
vpxor -96(%rax), %ymm10, %ymm10
vpxor -64(%rax), %ymm12, %ymm12
vpxor -32(%rax), %ymm13, %ymm13
vpxor (%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm11, %ymm11
vpxor 96(%rax), %ymm13, %ymm13
vpxor -96(%rcx), %ymm10, %ymm10
vpxor -64(%rcx), %ymm11, %ymm11
vpxor -32(%rcx), %ymm12, %ymm12
vpxor (%rcx), %ymm14, %ymm14
vpxor 32(%rcx), %ymm10, %ymm10
vpxor 64(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 64(%rax), %ymm6, %ymm11
vpxor 96(%rcx), %ymm7, %ymm12
vpxor 96(%rdi), %ymm8, %ymm13
vpxor 128(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 96(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, 128(%rax)
# Row 1
vpxor -32(%rax), %ymm8, %ymm10
vpxor (%rcx), %ymm9, %ymm11
vpxor -96(%rax), %ymm5, %ymm12
vpxor -64(%rcx), %ymm6, %ymm13
vpxor -64(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rax)
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, -64(%rdi)
# Row 2
vpxor 64(%rcx), %ymm6, %ymm10
vpxor 64(%rdi), %ymm7, %ymm11
vpxor 96(%rax), %ymm8, %ymm12
vpxor (%rdi), %ymm9, %ymm13
vpxor 32(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rcx)
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, 32(%rax)
# Row 3
vpxor 128(%rdi), %ymm9, %ymm10
vpxor -96(%rcx), %ymm5, %ymm11
vpxor -96(%rdi), %ymm6, %ymm12
vpxor -64(%rax), %ymm7, %ymm13
vpxor 128(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, 128(%rcx)
# Row 4
vpxor -32(%rcx), %ymm7, %ymm10
vpxor -32(%rdi), %ymm8, %ymm11
vpxor (%rax), %ymm9, %ymm12
vpxor 32(%rcx), %ymm5, %ymm13
vpxor 32(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rcx)
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 32(%rdi)
# Round 4
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm2, %ymm12
vpxor -64(%rdi), %ymm4, %ymm14
vpxor (%rdi), %ymm3, %ymm13
vpxor 64(%rdi), %ymm1, %ymm11
vpxor 96(%rdi), %ymm13, %ymm13
vpxor 128(%rdi), %ymm10, %ymm10
vpxor -96(%rax), %ymm12, %ymm12
vpxor -64(%rax), %ymm13, %ymm13
vpxor -32(%rax), %ymm10, %ymm10
vpxor 32(%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm11, %ymm11
vpxor 96(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm14, %ymm14
vpxor -96(%rcx), %ymm11, %ymm11
vpxor -64(%rcx), %ymm13, %ymm13
vpxor (%rcx), %ymm11, %ymm11
vpxor 64(%rcx), %ymm10, %ymm10
vpxor 96(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor (%rcx), %ymm6, %ymm11
vpxor 96(%rax), %ymm7, %ymm12
vpxor -64(%rax), %ymm8, %ymm13
vpxor 32(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 128(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, 32(%rdi)
# Row 1
vpxor 96(%rdi), %ymm8, %ymm10
vpxor -64(%rdi), %ymm9, %ymm11
vpxor 64(%rcx), %ymm5, %ymm12
vpxor -96(%rcx), %ymm6, %ymm13
vpxor (%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, (%rax)
# Row 2
vpxor 64(%rax), %ymm6, %ymm10
vpxor -96(%rax), %ymm7, %ymm11
vpxor (%rdi), %ymm8, %ymm12
vpxor 128(%rcx), %ymm9, %ymm13
vpxor -32(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rax)
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, -32(%rcx)
# Row 3
vpxor 128(%rax), %ymm9, %ymm10
vpxor -32(%rax), %ymm5, %ymm11
vpxor 64(%rdi), %ymm6, %ymm12
vpxor -96(%rdi), %ymm7, %ymm13
vpxor 32(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rax)
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, 32(%rcx)
# Row 4
vpxor 96(%rcx), %ymm7, %ymm10
vpxor -64(%rcx), %ymm8, %ymm11
vpxor 32(%rax), %ymm9, %ymm12
vpxor 128(%rdi), %ymm5, %ymm13
vpxor -32(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rcx)
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, -32(%rdi)
# Round 5
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm3, %ymm13
vpxor -64(%rdi), %ymm1, %ymm11
vpxor (%rdi), %ymm2, %ymm12
vpxor 32(%rdi), %ymm4, %ymm14
vpxor 64(%rdi), %ymm12, %ymm12
vpxor 96(%rdi), %ymm10, %ymm10
vpxor -96(%rax), %ymm11, %ymm11
vpxor -64(%rax), %ymm13, %ymm13
vpxor -32(%rax), %ymm11, %ymm11
vpxor (%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm10, %ymm10
vpxor 96(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm10, %ymm10
vpxor -96(%rcx), %ymm13, %ymm13
vpxor -32(%rcx), %ymm14, %ymm14
vpxor (%rcx), %ymm11, %ymm11
vpxor 32(%rcx), %ymm14, %ymm14
vpxor 64(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm13, %ymm13
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -64(%rdi), %ymm6, %ymm11
vpxor (%rdi), %ymm7, %ymm12
vpxor -96(%rdi), %ymm8, %ymm13
vpxor -32(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 160(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, -32(%rdi)
# Row 1
vpxor -64(%rax), %ymm8, %ymm10
vpxor (%rax), %ymm9, %ymm11
vpxor 64(%rax), %ymm5, %ymm12
vpxor -32(%rax), %ymm6, %ymm13
vpxor 32(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rax)
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, 32(%rax)
# Row 2
vpxor (%rcx), %ymm6, %ymm10
vpxor 64(%rcx), %ymm7, %ymm11
vpxor 128(%rcx), %ymm8, %ymm12
vpxor 32(%rcx), %ymm9, %ymm13
vpxor 96(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rcx)
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 96(%rcx)
# Row 3
vpxor 32(%rdi), %ymm9, %ymm10
vpxor 96(%rdi), %ymm5, %ymm11
vpxor -96(%rax), %ymm6, %ymm12
vpxor 64(%rdi), %ymm7, %ymm13
vpxor 128(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rdi)
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, 128(%rdi)
# Row 4
vpxor 96(%rax), %ymm7, %ymm10
vpxor -96(%rcx), %ymm8, %ymm11
vpxor -32(%rcx), %ymm9, %ymm12
vpxor 128(%rax), %ymm5, %ymm13
vpxor -64(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rax)
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, -64(%rcx)
# Round 6
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm3, %ymm13
vpxor -64(%rdi), %ymm1, %ymm11
vpxor -32(%rdi), %ymm4, %ymm14
vpxor (%rdi), %ymm2, %ymm12
vpxor 32(%rdi), %ymm10, %ymm10
vpxor 64(%rdi), %ymm13, %ymm13
vpxor 96(%rdi), %ymm11, %ymm11
vpxor 128(%rdi), %ymm14, %ymm14
vpxor -96(%rax), %ymm12, %ymm12
vpxor -64(%rax), %ymm10, %ymm10
vpxor -32(%rax), %ymm13, %ymm13
vpxor (%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm12, %ymm12
vpxor (%rcx), %ymm10, %ymm10
vpxor 32(%rcx), %ymm13, %ymm13
vpxor 64(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm14, %ymm14
vpxor 128(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor (%rax), %ymm6, %ymm11
vpxor 128(%rcx), %ymm7, %ymm12
vpxor 64(%rdi), %ymm8, %ymm13
vpxor -64(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 192(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, -64(%rcx)
# Row 1
vpxor -96(%rdi), %ymm8, %ymm10
vpxor 32(%rax), %ymm9, %ymm11
vpxor (%rcx), %ymm5, %ymm12
vpxor 96(%rdi), %ymm6, %ymm13
vpxor -32(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rdi)
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, -32(%rcx)
# Row 2
vpxor -64(%rdi), %ymm6, %ymm10
vpxor 64(%rax), %ymm7, %ymm11
vpxor 32(%rcx), %ymm8, %ymm12
vpxor 128(%rdi), %ymm9, %ymm13
vpxor 96(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rdi)
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, 96(%rax)
# Row 3
vpxor -32(%rdi), %ymm9, %ymm10
vpxor -64(%rax), %ymm5, %ymm11
vpxor 64(%rcx), %ymm6, %ymm12
vpxor -96(%rax), %ymm7, %ymm13
vpxor 128(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rdi)
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, 128(%rax)
# Row 4
vpxor (%rdi), %ymm7, %ymm10
vpxor -32(%rax), %ymm8, %ymm11
vpxor 96(%rcx), %ymm9, %ymm12
vpxor 32(%rdi), %ymm5, %ymm13
vpxor -96(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, -96(%rcx)
# Round 7
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm10, %ymm10
vpxor -64(%rdi), %ymm10, %ymm10
vpxor -32(%rdi), %ymm10, %ymm10
vpxor 64(%rdi), %ymm3, %ymm13
vpxor 96(%rdi), %ymm13, %ymm13
vpxor 128(%rdi), %ymm13, %ymm13
vpxor -96(%rax), %ymm13, %ymm13
vpxor -64(%rax), %ymm1, %ymm11
vpxor (%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm11, %ymm11
vpxor 64(%rax), %ymm11, %ymm11
vpxor 96(%rax), %ymm4, %ymm14
vpxor 128(%rax), %ymm14, %ymm14
vpxor -64(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm14, %ymm14
vpxor (%rcx), %ymm2, %ymm12
vpxor 32(%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 32(%rax), %ymm6, %ymm11
vpxor 32(%rcx), %ymm7, %ymm12
vpxor -96(%rax), %ymm8, %ymm13
vpxor -96(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 224(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, -96(%rcx)
# Row 1
vpxor 64(%rdi), %ymm8, %ymm10
vpxor -32(%rcx), %ymm9, %ymm11
vpxor -64(%rdi), %ymm5, %ymm12
vpxor -64(%rax), %ymm6, %ymm13
vpxor 96(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, 96(%rcx)
# Row 2
vpxor (%rax), %ymm6, %ymm10
vpxor (%rcx), %ymm7, %ymm11
vpxor 128(%rdi), %ymm8, %ymm12
vpxor 128(%rax), %ymm9, %ymm13
vpxor (%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rax)
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, (%rdi)
# Row 3
vpxor -64(%rcx), %ymm9, %ymm10
vpxor -96(%rdi), %ymm5, %ymm11
vpxor 64(%rax), %ymm6, %ymm12
vpxor 64(%rcx), %ymm7, %ymm13
vpxor 32(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rcx)
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, 32(%rdi)
# Row 4
vpxor 128(%rcx), %ymm7, %ymm10
vpxor 96(%rdi), %ymm8, %ymm11
vpxor 96(%rax), %ymm9, %ymm12
vpxor -32(%rdi), %ymm5, %ymm13
vpxor -32(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rcx)
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, -32(%rax)
# Round 8
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm1, %ymm11
vpxor -64(%rdi), %ymm2, %ymm12
vpxor (%rdi), %ymm4, %ymm14
vpxor 32(%rdi), %ymm14, %ymm14
vpxor 64(%rdi), %ymm10, %ymm10
vpxor 128(%rdi), %ymm12, %ymm12
vpxor -96(%rax), %ymm3, %ymm13
vpxor -64(%rax), %ymm13, %ymm13
vpxor (%rax), %ymm10, %ymm10
vpxor 32(%rax), %ymm11, %ymm11
vpxor 64(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm13, %ymm13
vpxor -96(%rcx), %ymm14, %ymm14
vpxor -64(%rcx), %ymm10, %ymm10
vpxor -32(%rcx), %ymm11, %ymm11
vpxor (%rcx), %ymm11, %ymm11
vpxor 32(%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm13, %ymm13
vpxor 96(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -32(%rcx), %ymm6, %ymm11
vpxor 128(%rdi), %ymm7, %ymm12
vpxor 64(%rcx), %ymm8, %ymm13
vpxor -32(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 256(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, -32(%rax)
# Row 1
vpxor -96(%rax), %ymm8, %ymm10
vpxor 96(%rcx), %ymm9, %ymm11
vpxor (%rax), %ymm5, %ymm12
vpxor -96(%rdi), %ymm6, %ymm13
vpxor 96(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rax)
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, 96(%rax)
# Row 2
vpxor 32(%rax), %ymm6, %ymm10
vpxor -64(%rdi), %ymm7, %ymm11
vpxor 128(%rax), %ymm8, %ymm12
vpxor 32(%rdi), %ymm9, %ymm13
vpxor 128(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rax)
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, 128(%rcx)
# Row 3
vpxor -96(%rcx), %ymm9, %ymm10
vpxor 64(%rdi), %ymm5, %ymm11
vpxor (%rcx), %ymm6, %ymm12
vpxor 64(%rax), %ymm7, %ymm13
vpxor -32(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rcx)
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, -32(%rdi)
# Row 4
vpxor 32(%rcx), %ymm7, %ymm10
vpxor -64(%rax), %ymm8, %ymm11
vpxor (%rdi), %ymm9, %ymm12
vpxor -64(%rcx), %ymm5, %ymm13
vpxor 96(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rcx)
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, 96(%rdi)
# Round 9
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm3, %ymm13
vpxor -64(%rdi), %ymm1, %ymm11
vpxor -32(%rdi), %ymm4, %ymm14
vpxor 32(%rdi), %ymm13, %ymm13
vpxor 64(%rdi), %ymm11, %ymm11
vpxor 128(%rdi), %ymm2, %ymm12
vpxor -96(%rax), %ymm10, %ymm10
vpxor -32(%rax), %ymm14, %ymm14
vpxor (%rax), %ymm12, %ymm12
vpxor 32(%rax), %ymm10, %ymm10
vpxor 64(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm14, %ymm14
vpxor 128(%rax), %ymm12, %ymm12
vpxor -96(%rcx), %ymm10, %ymm10
vpxor -32(%rcx), %ymm11, %ymm11
vpxor (%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm13, %ymm13
vpxor 96(%rcx), %ymm11, %ymm11
vpxor 128(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 96(%rcx), %ymm6, %ymm11
vpxor 128(%rax), %ymm7, %ymm12
vpxor 64(%rax), %ymm8, %ymm13
vpxor 96(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 288(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, 96(%rdi)
# Row 1
vpxor 64(%rcx), %ymm8, %ymm10
vpxor 96(%rax), %ymm9, %ymm11
vpxor 32(%rax), %ymm5, %ymm12
vpxor 64(%rdi), %ymm6, %ymm13
vpxor (%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rcx)
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, (%rdi)
# Row 2
vpxor -32(%rcx), %ymm6, %ymm10
vpxor (%rax), %ymm7, %ymm11
vpxor 32(%rdi), %ymm8, %ymm12
vpxor -32(%rdi), %ymm9, %ymm13
vpxor 32(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rcx)
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, 32(%rcx)
# Row 3
vpxor -32(%rax), %ymm9, %ymm10
vpxor -96(%rax), %ymm5, %ymm11
vpxor -64(%rdi), %ymm6, %ymm12
vpxor (%rcx), %ymm7, %ymm13
vpxor -64(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rax)
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, -64(%rcx)
# Row 4
vpxor 128(%rdi), %ymm7, %ymm10
vpxor -96(%rdi), %ymm8, %ymm11
vpxor 128(%rcx), %ymm9, %ymm12
vpxor -96(%rcx), %ymm5, %ymm13
vpxor -64(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, -64(%rax)
# Round 10
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm2, %ymm12
vpxor -32(%rdi), %ymm3, %ymm13
vpxor (%rdi), %ymm4, %ymm14
vpxor 32(%rdi), %ymm12, %ymm12
vpxor 64(%rdi), %ymm13, %ymm13
vpxor 96(%rdi), %ymm14, %ymm14
vpxor -96(%rax), %ymm1, %ymm11
vpxor -32(%rax), %ymm10, %ymm10
vpxor (%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm12, %ymm12
vpxor 64(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm11, %ymm11
vpxor 128(%rax), %ymm12, %ymm12
vpxor -64(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm10, %ymm10
vpxor (%rcx), %ymm13, %ymm13
vpxor 32(%rcx), %ymm14, %ymm14
vpxor 64(%rcx), %ymm10, %ymm10
vpxor 96(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 96(%rax), %ymm6, %ymm11
vpxor 32(%rdi), %ymm7, %ymm12
vpxor (%rcx), %ymm8, %ymm13
vpxor -64(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 320(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, -64(%rax)
# Row 1
vpxor 64(%rax), %ymm8, %ymm10
vpxor (%rdi), %ymm9, %ymm11
vpxor -32(%rcx), %ymm5, %ymm12
vpxor -96(%rax), %ymm6, %ymm13
vpxor 128(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rax)
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, 128(%rcx)
# Row 2
vpxor 96(%rcx), %ymm6, %ymm10
vpxor 32(%rax), %ymm7, %ymm11
vpxor -32(%rdi), %ymm8, %ymm12
vpxor -64(%rcx), %ymm9, %ymm13
vpxor 128(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rcx)
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, 128(%rdi)
# Row 3
vpxor 96(%rdi), %ymm9, %ymm10
vpxor 64(%rcx), %ymm5, %ymm11
vpxor (%rax), %ymm6, %ymm12
vpxor -64(%rdi), %ymm7, %ymm13
vpxor -96(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, -96(%rcx)
# Row 4
vpxor 128(%rax), %ymm7, %ymm10
vpxor 64(%rdi), %ymm8, %ymm11
vpxor 32(%rcx), %ymm9, %ymm12
vpxor -32(%rax), %ymm5, %ymm13
vpxor -96(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rax)
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, -96(%rdi)
# Round 11
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm3, %ymm13
vpxor -32(%rdi), %ymm2, %ymm12
vpxor (%rdi), %ymm1, %ymm11
vpxor 32(%rdi), %ymm12, %ymm12
vpxor 96(%rdi), %ymm10, %ymm10
vpxor 128(%rdi), %ymm4, %ymm14
vpxor -96(%rax), %ymm13, %ymm13
vpxor -64(%rax), %ymm14, %ymm14
vpxor (%rax), %ymm12, %ymm12
vpxor 32(%rax), %ymm11, %ymm11
vpxor 64(%rax), %ymm10, %ymm10
vpxor 96(%rax), %ymm11, %ymm11
vpxor -96(%rcx), %ymm14, %ymm14
vpxor -64(%rcx), %ymm13, %ymm13
vpxor -32(%rcx), %ymm12, %ymm12
vpxor (%rcx), %ymm13, %ymm13
vpxor 64(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm10, %ymm10
vpxor 128(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor (%rdi), %ymm6, %ymm11
vpxor -32(%rdi), %ymm7, %ymm12
vpxor -64(%rdi), %ymm8, %ymm13
vpxor -96(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 352(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, -96(%rdi)
# Row 1
vpxor (%rcx), %ymm8, %ymm10
vpxor 128(%rcx), %ymm9, %ymm11
vpxor 96(%rcx), %ymm5, %ymm12
vpxor 64(%rcx), %ymm6, %ymm13
vpxor 32(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rcx)
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, 32(%rcx)
# Row 2
vpxor 96(%rax), %ymm6, %ymm10
vpxor -32(%rcx), %ymm7, %ymm11
vpxor -64(%rcx), %ymm8, %ymm12
vpxor -96(%rcx), %ymm9, %ymm13
vpxor 128(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rax)
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, 128(%rax)
# Row 3
vpxor -64(%rax), %ymm9, %ymm10
vpxor 64(%rax), %ymm5, %ymm11
vpxor 32(%rax), %ymm6, %ymm12
vpxor (%rax), %ymm7, %ymm13
vpxor -32(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rax)
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, -32(%rax)
# Row 4
vpxor 32(%rdi), %ymm7, %ymm10
vpxor -96(%rax), %ymm8, %ymm11
vpxor 128(%rdi), %ymm9, %ymm12
vpxor 96(%rdi), %ymm5, %ymm13
vpxor 64(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rdi)
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, 64(%rdi)
# Round 12
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm4, %ymm14
vpxor -64(%rdi), %ymm3, %ymm13
vpxor -32(%rdi), %ymm2, %ymm12
vpxor (%rdi), %ymm1, %ymm11
vpxor -64(%rax), %ymm10, %ymm10
vpxor -32(%rax), %ymm14, %ymm14
vpxor (%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm12, %ymm12
vpxor 64(%rax), %ymm11, %ymm11
vpxor 96(%rax), %ymm10, %ymm10
vpxor 128(%rax), %ymm14, %ymm14
vpxor -96(%rcx), %ymm13, %ymm13
vpxor -64(%rcx), %ymm12, %ymm12
vpxor -32(%rcx), %ymm11, %ymm11
vpxor (%rcx), %ymm10, %ymm10
vpxor 32(%rcx), %ymm14, %ymm14
vpxor 64(%rcx), %ymm13, %ymm13
vpxor 96(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 128(%rcx), %ymm6, %ymm11
vpxor -64(%rcx), %ymm7, %ymm12
vpxor (%rax), %ymm8, %ymm13
vpxor 64(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 384(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, 64(%rdi)
# Row 1
vpxor -64(%rdi), %ymm8, %ymm10
vpxor 32(%rcx), %ymm9, %ymm11
vpxor 96(%rax), %ymm5, %ymm12
vpxor 64(%rax), %ymm6, %ymm13
vpxor 128(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rdi)
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, 128(%rdi)
# Row 2
vpxor (%rdi), %ymm6, %ymm10
vpxor 96(%rcx), %ymm7, %ymm11
vpxor -96(%rcx), %ymm8, %ymm12
vpxor -32(%rax), %ymm9, %ymm13
vpxor 32(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, 32(%rdi)
# Row 3
vpxor -96(%rdi), %ymm9, %ymm10
vpxor (%rcx), %ymm5, %ymm11
vpxor -32(%rcx), %ymm6, %ymm12
vpxor 32(%rax), %ymm7, %ymm13
vpxor 96(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rdi)
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, 96(%rdi)
# Row 4
vpxor -32(%rdi), %ymm7, %ymm10
vpxor 64(%rcx), %ymm8, %ymm11
vpxor 128(%rax), %ymm9, %ymm12
vpxor -64(%rax), %ymm5, %ymm13
vpxor -96(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rdi)
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, -96(%rax)
# Round 13
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm10, %ymm10
vpxor -64(%rdi), %ymm10, %ymm10
vpxor (%rdi), %ymm10, %ymm10
vpxor 32(%rdi), %ymm4, %ymm14
vpxor 64(%rdi), %ymm14, %ymm14
vpxor 96(%rdi), %ymm14, %ymm14
vpxor 128(%rdi), %ymm14, %ymm14
vpxor -32(%rax), %ymm3, %ymm13
vpxor (%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm13, %ymm13
vpxor 64(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm2, %ymm12
vpxor -96(%rcx), %ymm12, %ymm12
vpxor -64(%rcx), %ymm12, %ymm12
vpxor -32(%rcx), %ymm12, %ymm12
vpxor (%rcx), %ymm1, %ymm11
vpxor 32(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm11, %ymm11
vpxor 128(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 32(%rcx), %ymm6, %ymm11
vpxor -96(%rcx), %ymm7, %ymm12
vpxor 32(%rax), %ymm8, %ymm13
vpxor -96(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 416(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, -96(%rax)
# Row 1
vpxor (%rax), %ymm8, %ymm10
vpxor 128(%rdi), %ymm9, %ymm11
vpxor (%rdi), %ymm5, %ymm12
vpxor (%rcx), %ymm6, %ymm13
vpxor 128(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rax)
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, 128(%rax)
# Row 2
vpxor 128(%rcx), %ymm6, %ymm10
vpxor 96(%rax), %ymm7, %ymm11
vpxor -32(%rax), %ymm8, %ymm12
vpxor 96(%rdi), %ymm9, %ymm13
vpxor -32(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rcx)
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, -32(%rdi)
# Row 3
vpxor 64(%rdi), %ymm9, %ymm10
vpxor -64(%rdi), %ymm5, %ymm11
vpxor 96(%rcx), %ymm6, %ymm12
vpxor -32(%rcx), %ymm7, %ymm13
vpxor -64(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, -64(%rax)
# Row 4
vpxor -64(%rcx), %ymm7, %ymm10
vpxor 64(%rax), %ymm8, %ymm11
vpxor 32(%rdi), %ymm9, %ymm12
vpxor -96(%rdi), %ymm5, %ymm13
vpxor 64(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rcx)
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, 64(%rcx)
# Round 14
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm1, %ymm11
vpxor -32(%rdi), %ymm4, %ymm14
vpxor (%rdi), %ymm2, %ymm12
vpxor 64(%rdi), %ymm10, %ymm10
vpxor 96(%rdi), %ymm3, %ymm13
vpxor 128(%rdi), %ymm11, %ymm11
vpxor -96(%rax), %ymm14, %ymm14
vpxor -64(%rax), %ymm14, %ymm14
vpxor -32(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm10, %ymm10
vpxor 32(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm11, %ymm11
vpxor 128(%rax), %ymm14, %ymm14
vpxor -96(%rcx), %ymm12, %ymm12
vpxor -32(%rcx), %ymm13, %ymm13
vpxor (%rcx), %ymm13, %ymm13
vpxor 32(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm10, %ymm10
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 128(%rdi), %ymm6, %ymm11
vpxor -32(%rax), %ymm7, %ymm12
vpxor -32(%rcx), %ymm8, %ymm13
vpxor 64(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 448(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, 64(%rcx)
# Row 1
vpxor 32(%rax), %ymm8, %ymm10
vpxor 128(%rax), %ymm9, %ymm11
vpxor 128(%rcx), %ymm5, %ymm12
vpxor -64(%rdi), %ymm6, %ymm13
vpxor 32(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rax)
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, 32(%rdi)
# Row 2
vpxor 32(%rcx), %ymm6, %ymm10
vpxor (%rdi), %ymm7, %ymm11
vpxor 96(%rdi), %ymm8, %ymm12
vpxor -64(%rax), %ymm9, %ymm13
vpxor -64(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rcx)
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, -64(%rcx)
# Row 3
vpxor -96(%rax), %ymm9, %ymm10
vpxor (%rax), %ymm5, %ymm11
vpxor 96(%rax), %ymm6, %ymm12
vpxor 96(%rcx), %ymm7, %ymm13
vpxor -96(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rax)
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, -96(%rdi)
# Row 4
vpxor -96(%rcx), %ymm7, %ymm10
vpxor (%rcx), %ymm8, %ymm11
vpxor -32(%rdi), %ymm9, %ymm12
vpxor 64(%rdi), %ymm5, %ymm13
vpxor 64(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rcx)
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, 64(%rax)
# Round 15
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm4, %ymm14
vpxor -64(%rdi), %ymm3, %ymm13
vpxor (%rdi), %ymm1, %ymm11
vpxor 32(%rdi), %ymm14, %ymm14
vpxor 96(%rdi), %ymm2, %ymm12
vpxor 128(%rdi), %ymm11, %ymm11
vpxor -96(%rax), %ymm10, %ymm10
vpxor -64(%rax), %ymm13, %ymm13
vpxor -32(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm10, %ymm10
vpxor 96(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm11, %ymm11
vpxor -64(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm13, %ymm13
vpxor 32(%rcx), %ymm10, %ymm10
vpxor 64(%rcx), %ymm14, %ymm14
vpxor 96(%rcx), %ymm13, %ymm13
vpxor 128(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 128(%rax), %ymm6, %ymm11
vpxor 96(%rdi), %ymm7, %ymm12
vpxor 96(%rcx), %ymm8, %ymm13
vpxor 64(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 480(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, 64(%rax)
# Row 1
vpxor -32(%rcx), %ymm8, %ymm10
vpxor 32(%rdi), %ymm9, %ymm11
vpxor 32(%rcx), %ymm5, %ymm12
vpxor (%rax), %ymm6, %ymm13
vpxor -32(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rcx)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, -32(%rdi)
# Row 2
vpxor 128(%rdi), %ymm6, %ymm10
vpxor 128(%rcx), %ymm7, %ymm11
vpxor -64(%rax), %ymm8, %ymm12
vpxor -96(%rdi), %ymm9, %ymm13
vpxor -96(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, -96(%rcx)
# Row 3
vpxor 64(%rcx), %ymm9, %ymm10
vpxor 32(%rax), %ymm5, %ymm11
vpxor (%rdi), %ymm6, %ymm12
vpxor 96(%rax), %ymm7, %ymm13
vpxor 64(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rcx)
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, 64(%rdi)
# Row 4
vpxor -32(%rax), %ymm7, %ymm10
vpxor -64(%rdi), %ymm8, %ymm11
vpxor -64(%rcx), %ymm9, %ymm12
vpxor -96(%rax), %ymm5, %ymm13
vpxor (%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rax)
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, (%rcx)
# Round 16
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm3, %ymm13
vpxor -32(%rdi), %ymm4, %ymm14
vpxor (%rdi), %ymm2, %ymm12
vpxor 32(%rdi), %ymm1, %ymm11
vpxor 64(%rdi), %ymm14, %ymm14
vpxor 96(%rdi), %ymm12, %ymm12
vpxor 128(%rdi), %ymm10, %ymm10
vpxor -64(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm11, %ymm11
vpxor 64(%rax), %ymm14, %ymm14
vpxor 96(%rax), %ymm13, %ymm13
vpxor 128(%rax), %ymm11, %ymm11
vpxor -96(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm10, %ymm10
vpxor 32(%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm10, %ymm10
vpxor 96(%rcx), %ymm13, %ymm13
vpxor 128(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 32(%rdi), %ymm6, %ymm11
vpxor -64(%rax), %ymm7, %ymm12
vpxor 96(%rax), %ymm8, %ymm13
vpxor (%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 512(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, (%rcx)
# Row 1
vpxor 96(%rcx), %ymm8, %ymm10
vpxor -32(%rdi), %ymm9, %ymm11
vpxor 128(%rdi), %ymm5, %ymm12
vpxor 32(%rax), %ymm6, %ymm13
vpxor -64(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rcx)
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, -64(%rcx)
# Row 2
vpxor 128(%rax), %ymm6, %ymm10
vpxor 32(%rcx), %ymm7, %ymm11
vpxor -96(%rdi), %ymm8, %ymm12
vpxor 64(%rdi), %ymm9, %ymm13
vpxor -32(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rax)
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, -32(%rax)
# Row 3
vpxor 64(%rax), %ymm9, %ymm10
vpxor -32(%rcx), %ymm5, %ymm11
vpxor 128(%rcx), %ymm6, %ymm12
vpxor (%rdi), %ymm7, %ymm13
vpxor -96(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rax)
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, -96(%rax)
# Row 4
vpxor 96(%rdi), %ymm7, %ymm10
vpxor (%rax), %ymm8, %ymm11
vpxor -96(%rcx), %ymm9, %ymm12
vpxor 64(%rcx), %ymm5, %ymm13
vpxor -64(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, -64(%rdi)
# Round 17
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm2, %ymm12
vpxor -32(%rdi), %ymm1, %ymm11
vpxor (%rdi), %ymm3, %ymm13
vpxor 32(%rdi), %ymm11, %ymm11
vpxor 64(%rdi), %ymm13, %ymm13
vpxor 128(%rdi), %ymm12, %ymm12
vpxor -96(%rax), %ymm4, %ymm14
vpxor -64(%rax), %ymm12, %ymm12
vpxor -32(%rax), %ymm14, %ymm14
vpxor 32(%rax), %ymm13, %ymm13
vpxor 64(%rax), %ymm10, %ymm10
vpxor 96(%rax), %ymm13, %ymm13
vpxor 128(%rax), %ymm10, %ymm10
vpxor -64(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm11, %ymm11
vpxor (%rcx), %ymm14, %ymm14
vpxor 32(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm10, %ymm10
vpxor 128(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -32(%rdi), %ymm6, %ymm11
vpxor -96(%rdi), %ymm7, %ymm12
vpxor (%rdi), %ymm8, %ymm13
vpxor -64(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 544(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, -64(%rdi)
# Row 1
vpxor 96(%rax), %ymm8, %ymm10
vpxor -64(%rcx), %ymm9, %ymm11
vpxor 128(%rax), %ymm5, %ymm12
vpxor -32(%rcx), %ymm6, %ymm13
vpxor -96(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rax)
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, -96(%rcx)
# Row 2
vpxor 32(%rdi), %ymm6, %ymm10
vpxor 128(%rdi), %ymm7, %ymm11
vpxor 64(%rdi), %ymm8, %ymm12
vpxor -96(%rax), %ymm9, %ymm13
vpxor 96(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rdi)
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, 96(%rdi)
# Row 3
vpxor (%rcx), %ymm9, %ymm10
vpxor 96(%rcx), %ymm5, %ymm11
vpxor 32(%rcx), %ymm6, %ymm12
vpxor 128(%rcx), %ymm7, %ymm13
vpxor 64(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rcx)
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, 64(%rcx)
# Row 4
vpxor -64(%rax), %ymm7, %ymm10
vpxor 32(%rax), %ymm8, %ymm11
vpxor -32(%rax), %ymm9, %ymm12
vpxor 64(%rax), %ymm5, %ymm13
vpxor (%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rax)
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, (%rax)
# Round 18
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm2, %ymm12
vpxor -64(%rdi), %ymm4, %ymm14
vpxor -32(%rdi), %ymm1, %ymm11
vpxor (%rdi), %ymm3, %ymm13
vpxor 32(%rdi), %ymm10, %ymm10
vpxor 64(%rdi), %ymm12, %ymm12
vpxor 96(%rdi), %ymm14, %ymm14
vpxor 128(%rdi), %ymm11, %ymm11
vpxor -96(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm10, %ymm10
vpxor 128(%rax), %ymm12, %ymm12
vpxor -96(%rcx), %ymm14, %ymm14
vpxor -64(%rcx), %ymm11, %ymm11
vpxor -32(%rcx), %ymm13, %ymm13
vpxor (%rcx), %ymm10, %ymm10
vpxor 32(%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm14, %ymm14
vpxor 96(%rcx), %ymm11, %ymm11
vpxor 128(%rcx), %ymm13, %ymm13
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -64(%rcx), %ymm6, %ymm11
vpxor 64(%rdi), %ymm7, %ymm12
vpxor 128(%rcx), %ymm8, %ymm13
vpxor (%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 576(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, (%rax)
# Row 1
vpxor (%rdi), %ymm8, %ymm10
vpxor -96(%rcx), %ymm9, %ymm11
vpxor 32(%rdi), %ymm5, %ymm12
vpxor 96(%rcx), %ymm6, %ymm13
vpxor -32(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, -32(%rax)
# Row 2
vpxor -32(%rdi), %ymm6, %ymm10
vpxor 128(%rax), %ymm7, %ymm11
vpxor -96(%rax), %ymm8, %ymm12
vpxor 64(%rcx), %ymm9, %ymm13
vpxor -64(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rdi)
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, -64(%rax)
# Row 3
vpxor -64(%rdi), %ymm9, %ymm10
vpxor 96(%rax), %ymm5, %ymm11
vpxor 128(%rdi), %ymm6, %ymm12
vpxor 32(%rcx), %ymm7, %ymm13
vpxor 64(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rdi)
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 64(%rax)
# Row 4
vpxor -96(%rdi), %ymm7, %ymm10
vpxor -32(%rcx), %ymm8, %ymm11
vpxor 96(%rdi), %ymm9, %ymm12
vpxor (%rcx), %ymm5, %ymm13
vpxor 32(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rdi)
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, 32(%rax)
# Round 19
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm10, %ymm10
vpxor -32(%rdi), %ymm10, %ymm10
vpxor (%rdi), %ymm10, %ymm10
vpxor 32(%rdi), %ymm2, %ymm12
vpxor 64(%rdi), %ymm12, %ymm12
vpxor 128(%rdi), %ymm12, %ymm12
vpxor -96(%rax), %ymm12, %ymm12
vpxor -64(%rax), %ymm4, %ymm14
vpxor -32(%rax), %ymm14, %ymm14
vpxor (%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm14, %ymm14
vpxor 96(%rax), %ymm1, %ymm11
vpxor 128(%rax), %ymm11, %ymm11
vpxor -96(%rcx), %ymm11, %ymm11
vpxor -64(%rcx), %ymm11, %ymm11
vpxor 32(%rcx), %ymm3, %ymm13
vpxor 64(%rcx), %ymm13, %ymm13
vpxor 96(%rcx), %ymm13, %ymm13
vpxor 128(%rcx), %ymm13, %ymm13
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -96(%rcx), %ymm6, %ymm11
vpxor -96(%rax), %ymm7, %ymm12
vpxor 32(%rcx), %ymm8, %ymm13
vpxor 32(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 608(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 32(%rax)
# Row 1
vpxor 128(%rcx), %ymm8, %ymm10
vpxor -32(%rax), %ymm9, %ymm11
vpxor -32(%rdi), %ymm5, %ymm12
vpxor 96(%rax), %ymm6, %ymm13
vpxor 96(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rcx)
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, 96(%rdi)
# Row 2
vpxor -64(%rcx), %ymm6, %ymm10
vpxor 32(%rdi), %ymm7, %ymm11
vpxor 64(%rcx), %ymm8, %ymm12
vpxor 64(%rax), %ymm9, %ymm13
vpxor -96(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rcx)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, -96(%rdi)
# Row 3
vpxor (%rax), %ymm9, %ymm10
vpxor (%rdi), %ymm5, %ymm11
vpxor 128(%rax), %ymm6, %ymm12
vpxor 128(%rdi), %ymm7, %ymm13
vpxor (%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rax)
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, (%rcx)
# Row 4
vpxor 64(%rdi), %ymm7, %ymm10
vpxor 96(%rcx), %ymm8, %ymm11
vpxor -64(%rax), %ymm9, %ymm12
vpxor -64(%rdi), %ymm5, %ymm13
vpxor -32(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, -32(%rcx)
# Round 20
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm4, %ymm14
vpxor -32(%rdi), %ymm2, %ymm12
vpxor (%rdi), %ymm1, %ymm11
vpxor 32(%rdi), %ymm11, %ymm11
vpxor 96(%rdi), %ymm14, %ymm14
vpxor 128(%rdi), %ymm3, %ymm13
vpxor -96(%rax), %ymm12, %ymm12
vpxor -32(%rax), %ymm11, %ymm11
vpxor (%rax), %ymm10, %ymm10
vpxor 32(%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm13, %ymm13
vpxor 128(%rax), %ymm12, %ymm12
vpxor -96(%rcx), %ymm11, %ymm11
vpxor -64(%rcx), %ymm10, %ymm10
vpxor (%rcx), %ymm14, %ymm14
vpxor 32(%rcx), %ymm13, %ymm13
vpxor 64(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm10, %ymm10
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -32(%rax), %ymm6, %ymm11
vpxor 64(%rcx), %ymm7, %ymm12
vpxor 128(%rdi), %ymm8, %ymm13
vpxor -32(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 640(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, -32(%rcx)
# Row 1
vpxor 32(%rcx), %ymm8, %ymm10
vpxor 96(%rdi), %ymm9, %ymm11
vpxor -64(%rcx), %ymm5, %ymm12
vpxor (%rdi), %ymm6, %ymm13
vpxor -64(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rcx)
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, -64(%rax)
# Row 2
vpxor -96(%rcx), %ymm6, %ymm10
vpxor -32(%rdi), %ymm7, %ymm11
vpxor 64(%rax), %ymm8, %ymm12
vpxor (%rcx), %ymm9, %ymm13
vpxor 64(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rcx)
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, 64(%rdi)
# Row 3
vpxor 32(%rax), %ymm9, %ymm10
vpxor 128(%rcx), %ymm5, %ymm11
vpxor 32(%rdi), %ymm6, %ymm12
vpxor 128(%rax), %ymm7, %ymm13
vpxor -64(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rax)
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, -64(%rdi)
# Row 4
vpxor -96(%rax), %ymm7, %ymm10
vpxor 96(%rax), %ymm8, %ymm11
vpxor -96(%rdi), %ymm9, %ymm12
vpxor (%rax), %ymm5, %ymm13
vpxor 96(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rax)
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, 96(%rcx)
# Round 21
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm4, %ymm14
vpxor -32(%rdi), %ymm1, %ymm11
vpxor (%rdi), %ymm3, %ymm13
vpxor 32(%rdi), %ymm2, %ymm12
vpxor 64(%rdi), %ymm14, %ymm14
vpxor 96(%rdi), %ymm11, %ymm11
vpxor 128(%rdi), %ymm13, %ymm13
vpxor -64(%rax), %ymm14, %ymm14
vpxor -32(%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm10, %ymm10
vpxor 64(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm13, %ymm13
vpxor -96(%rcx), %ymm10, %ymm10
vpxor -64(%rcx), %ymm12, %ymm12
vpxor -32(%rcx), %ymm14, %ymm14
vpxor (%rcx), %ymm13, %ymm13
vpxor 32(%rcx), %ymm10, %ymm10
vpxor 64(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 96(%rdi), %ymm6, %ymm11
vpxor 64(%rax), %ymm7, %ymm12
vpxor 128(%rax), %ymm8, %ymm13
vpxor 96(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 672(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, 96(%rcx)
# Row 1
vpxor 128(%rdi), %ymm8, %ymm10
vpxor -64(%rax), %ymm9, %ymm11
vpxor -96(%rcx), %ymm5, %ymm12
vpxor 128(%rcx), %ymm6, %ymm13
vpxor -96(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, -96(%rdi)
# Row 2
vpxor -32(%rax), %ymm6, %ymm10
vpxor -64(%rcx), %ymm7, %ymm11
vpxor (%rcx), %ymm8, %ymm12
vpxor -64(%rdi), %ymm9, %ymm13
vpxor -96(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rax)
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, -96(%rax)
# Row 3
vpxor -32(%rcx), %ymm9, %ymm10
vpxor 32(%rcx), %ymm5, %ymm11
vpxor -32(%rdi), %ymm6, %ymm12
vpxor 32(%rdi), %ymm7, %ymm13
vpxor (%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rcx)
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, (%rax)
# Row 4
vpxor 64(%rcx), %ymm7, %ymm10
vpxor (%rdi), %ymm8, %ymm11
vpxor 64(%rdi), %ymm9, %ymm12
vpxor 32(%rax), %ymm5, %ymm13
vpxor 96(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rcx)
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, 96(%rax)
# Round 22
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm4, %ymm14
vpxor -64(%rdi), %ymm3, %ymm13
vpxor -32(%rdi), %ymm2, %ymm12
vpxor 32(%rdi), %ymm13, %ymm13
vpxor 96(%rdi), %ymm1, %ymm11
vpxor 128(%rdi), %ymm10, %ymm10
vpxor -96(%rax), %ymm14, %ymm14
vpxor -64(%rax), %ymm11, %ymm11
vpxor -32(%rax), %ymm10, %ymm10
vpxor (%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm13, %ymm13
vpxor -96(%rcx), %ymm12, %ymm12
vpxor -64(%rcx), %ymm11, %ymm11
vpxor -32(%rcx), %ymm10, %ymm10
vpxor (%rcx), %ymm12, %ymm12
vpxor 32(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm14, %ymm14
vpxor 128(%rcx), %ymm13, %ymm13
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -64(%rax), %ymm6, %ymm11
vpxor (%rcx), %ymm7, %ymm12
vpxor 32(%rdi), %ymm8, %ymm13
vpxor 96(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 704(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, 96(%rax)
# Row 1
vpxor 128(%rax), %ymm8, %ymm10
vpxor -96(%rdi), %ymm9, %ymm11
vpxor -32(%rax), %ymm5, %ymm12
vpxor 32(%rcx), %ymm6, %ymm13
vpxor 64(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rax)
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 64(%rdi)
# Row 2
vpxor 96(%rdi), %ymm6, %ymm10
vpxor -96(%rcx), %ymm7, %ymm11
vpxor -64(%rdi), %ymm8, %ymm12
vpxor (%rax), %ymm9, %ymm13
vpxor 64(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, 64(%rcx)
# Row 3
vpxor 96(%rcx), %ymm9, %ymm10
vpxor 128(%rdi), %ymm5, %ymm11
vpxor -64(%rcx), %ymm6, %ymm12
vpxor -32(%rdi), %ymm7, %ymm13
vpxor 32(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rcx)
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, 32(%rax)
# Row 4
vpxor 64(%rax), %ymm7, %ymm10
vpxor 128(%rcx), %ymm8, %ymm11
vpxor -96(%rax), %ymm9, %ymm12
vpxor -32(%rcx), %ymm5, %ymm13
vpxor (%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rax)
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, (%rdi)
# Round 23
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm1, %ymm11
vpxor -64(%rdi), %ymm2, %ymm12
vpxor -32(%rdi), %ymm3, %ymm13
vpxor 32(%rdi), %ymm13, %ymm13
vpxor 64(%rdi), %ymm4, %ymm14
vpxor 96(%rdi), %ymm10, %ymm10
vpxor 128(%rdi), %ymm11, %ymm11
vpxor -64(%rax), %ymm11, %ymm11
vpxor -32(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm14, %ymm14
vpxor 96(%rax), %ymm14, %ymm14
vpxor 128(%rax), %ymm10, %ymm10
vpxor -96(%rcx), %ymm11, %ymm11
vpxor -64(%rcx), %ymm12, %ymm12
vpxor (%rcx), %ymm12, %ymm12
vpxor 32(%rcx), %ymm13, %ymm13
vpxor 64(%rcx), %ymm14, %ymm14
vpxor 96(%rcx), %ymm10, %ymm10
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -96(%rdi), %ymm6, %ymm11
vpxor -64(%rdi), %ymm7, %ymm12
vpxor -32(%rdi), %ymm8, %ymm13
vpxor (%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 736(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, (%rdi)
# Row 1
vpxor 32(%rdi), %ymm8, %ymm10
vpxor 64(%rdi), %ymm9, %ymm11
vpxor 96(%rdi), %ymm5, %ymm12
vpxor 128(%rdi), %ymm6, %ymm13
vpxor -96(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rdi)
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, -96(%rax)
# Row 2
vpxor -64(%rax), %ymm6, %ymm10
vpxor -32(%rax), %ymm7, %ymm11
vpxor (%rax), %ymm8, %ymm12
vpxor 32(%rax), %ymm9, %ymm13
vpxor 64(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rax)
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, 64(%rax)
# Row 3
vpxor 96(%rax), %ymm9, %ymm10
vpxor 128(%rax), %ymm5, %ymm11
vpxor -96(%rcx), %ymm6, %ymm12
vpxor -64(%rcx), %ymm7, %ymm13
vpxor -32(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rax)
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, -32(%rcx)
# Row 4
vpxor (%rcx), %ymm7, %ymm10
vpxor 32(%rcx), %ymm8, %ymm11
vpxor 64(%rcx), %ymm9, %ymm12
vpxor 96(%rcx), %ymm5, %ymm13
vpxor 128(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rcx)
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, 128(%rcx)
subq $0x80, %rdi
vmovdqu %ymm15, (%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_sha3_blocksx4_avx2,.-kyber_sha3_blocksx4_avx2
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/COMP/COMP_Interrupt/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aenu1/aps3e
| 32,281
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/poly1305_asm.S
|
/* poly1305_asm.S */
/*
* Copyright (C) 2006-2024 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef WOLFSSL_USER_SETTINGS
#ifdef WOLFSSL_USER_SETTINGS_ASM
/*
* user_settings_asm.h is a file generated by the script user_settings_asm.sh.
* The script takes in a user_settings.h and produces user_settings_asm.h, which
* is a stripped down version of user_settings.h containing only preprocessor
* directives. This makes the header safe to include in assembly (.S) files.
*/
#include "user_settings_asm.h"
#else
/*
* Note: if user_settings.h contains any C code (e.g. a typedef or function
* prototype), including it here in an assembly (.S) file will cause an
* assembler failure. See user_settings_asm.h above.
*/
#include "user_settings.h"
#endif /* WOLFSSL_USER_SETTINGS_ASM */
#endif /* WOLFSSL_USER_SETTINGS */
#ifndef HAVE_INTEL_AVX1
#define HAVE_INTEL_AVX1
#endif /* HAVE_INTEL_AVX1 */
#ifndef NO_AVX2_SUPPORT
#define HAVE_INTEL_AVX2
#endif /* NO_AVX2_SUPPORT */
#ifdef WOLFSSL_X86_64_BUILD
#ifdef HAVE_INTEL_AVX1
#ifndef __APPLE__
.text
.globl poly1305_setkey_avx
.type poly1305_setkey_avx,@function
.align 16
poly1305_setkey_avx:
#else
.section __TEXT,__text
.globl _poly1305_setkey_avx
.p2align 4
_poly1305_setkey_avx:
#endif /* __APPLE__ */
movabsq $0xffffffc0fffffff, %r10
movabsq $0xffffffc0ffffffc, %r11
movq (%rsi), %rdx
movq 8(%rsi), %rax
movq 16(%rsi), %rcx
movq 24(%rsi), %r8
andq %r10, %rdx
andq %r11, %rax
movq %rdx, %r10
movq %rax, %r11
xorq %r9, %r9
movq %rdx, (%rdi)
movq %rax, 8(%rdi)
movq %r9, 24(%rdi)
movq %r9, 32(%rdi)
movq %r9, 40(%rdi)
movq %rcx, 48(%rdi)
movq %r8, 56(%rdi)
movq %r9, 352(%rdi)
movq %r9, 408(%rdi)
movq %rdx, 360(%rdi)
movq %rax, 416(%rdi)
addq %rdx, %r10
addq %rax, %r11
movq %r10, 368(%rdi)
movq %r11, 424(%rdi)
addq %rdx, %r10
addq %rax, %r11
movq %r10, 376(%rdi)
movq %r11, 432(%rdi)
addq %rdx, %r10
addq %rax, %r11
movq %r10, 384(%rdi)
movq %r11, 440(%rdi)
addq %rdx, %r10
addq %rax, %r11
movq %r10, 392(%rdi)
movq %r11, 448(%rdi)
addq %rdx, %r10
addq %rax, %r11
movq %r10, 400(%rdi)
movq %r11, 456(%rdi)
movq %r9, 608(%rdi)
movb $0x01, 616(%rdi)
repz retq
#ifndef __APPLE__
.size poly1305_setkey_avx,.-poly1305_setkey_avx
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl poly1305_block_avx
.type poly1305_block_avx,@function
.align 16
poly1305_block_avx:
#else
.section __TEXT,__text
.globl _poly1305_block_avx
.p2align 4
_poly1305_block_avx:
#endif /* __APPLE__ */
pushq %r15
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
movq (%rdi), %r15
movq 8(%rdi), %rbx
movq 24(%rdi), %r8
movq 32(%rdi), %r9
movq 40(%rdi), %r10
xorq %r14, %r14
movb 616(%rdi), %r14b
# h += m
movq (%rsi), %r11
movq 8(%rsi), %r12
addq %r11, %r8
adcq %r12, %r9
movq %rbx, %rax
adcq %r14, %r10
# r[1] * h[0] => rdx, rax ==> t2, t1
mulq %r8
movq %rax, %r12
movq %rdx, %r13
# r[0] * h[1] => rdx, rax ++> t2, t1
movq %r15, %rax
mulq %r9
addq %rax, %r12
movq %r15, %rax
adcq %rdx, %r13
# r[0] * h[0] => rdx, rax ==> t4, t0
mulq %r8
movq %rax, %r11
movq %rdx, %r8
# r[1] * h[1] => rdx, rax =+> t3, t2
movq %rbx, %rax
mulq %r9
# r[0] * h[2] +> t2
addq 352(%rdi,%r10,8), %r13
movq %rdx, %r14
addq %r8, %r12
adcq %rax, %r13
# r[1] * h[2] +> t3
adcq 408(%rdi,%r10,8), %r14
# r * h in r14, r13, r12, r11
# h = (r * h) mod 2^130 - 5
movq %r13, %r10
andq $-4, %r13
andq $3, %r10
addq %r13, %r11
movq %r13, %r8
adcq %r14, %r12
adcq $0x00, %r10
shrdq $2, %r14, %r8
shrq $2, %r14
addq %r11, %r8
adcq %r14, %r12
movq %r12, %r9
adcq $0x00, %r10
# h in r10, r9, r8
# Store h to ctx
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
movq %r10, 40(%rdi)
popq %r14
popq %r13
popq %r12
popq %rbx
popq %r15
repz retq
#ifndef __APPLE__
.size poly1305_block_avx,.-poly1305_block_avx
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl poly1305_blocks_avx
.type poly1305_blocks_avx,@function
.align 16
poly1305_blocks_avx:
#else
.section __TEXT,__text
.globl _poly1305_blocks_avx
.p2align 4
_poly1305_blocks_avx:
#endif /* __APPLE__ */
pushq %r15
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
movq %rdx, %rcx
movq (%rdi), %r15
movq 8(%rdi), %rbx
movq 24(%rdi), %r8
movq 32(%rdi), %r9
movq 40(%rdi), %r10
L_poly1305_avx_blocks_start:
# h += m
movq (%rsi), %r11
movq 8(%rsi), %r12
addq %r11, %r8
adcq %r12, %r9
movq %rbx, %rax
adcq $0x00, %r10
# r[1] * h[0] => rdx, rax ==> t2, t1
mulq %r8
movq %rax, %r12
movq %rdx, %r13
# r[0] * h[1] => rdx, rax ++> t2, t1
movq %r15, %rax
mulq %r9
addq %rax, %r12
movq %r15, %rax
adcq %rdx, %r13
# r[0] * h[0] => rdx, rax ==> t4, t0
mulq %r8
movq %rax, %r11
movq %rdx, %r8
# r[1] * h[1] => rdx, rax =+> t3, t2
movq %rbx, %rax
mulq %r9
# r[0] * h[2] +> t2
addq 360(%rdi,%r10,8), %r13
movq %rdx, %r14
addq %r8, %r12
adcq %rax, %r13
# r[1] * h[2] +> t3
adcq 416(%rdi,%r10,8), %r14
# r * h in r14, r13, r12, r11
# h = (r * h) mod 2^130 - 5
movq %r13, %r10
andq $-4, %r13
andq $3, %r10
addq %r13, %r11
movq %r13, %r8
adcq %r14, %r12
adcq $0x00, %r10
shrdq $2, %r14, %r8
shrq $2, %r14
addq %r11, %r8
adcq %r14, %r12
movq %r12, %r9
adcq $0x00, %r10
# h in r10, r9, r8
# Next block from message
addq $16, %rsi
subq $16, %rcx
jg L_poly1305_avx_blocks_start
# Store h to ctx
movq %r8, 24(%rdi)
movq %r9, 32(%rdi)
movq %r10, 40(%rdi)
popq %r14
popq %r13
popq %r12
popq %rbx
popq %r15
repz retq
#ifndef __APPLE__
.size poly1305_blocks_avx,.-poly1305_blocks_avx
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl poly1305_final_avx
.type poly1305_final_avx,@function
.align 16
poly1305_final_avx:
#else
.section __TEXT,__text
.globl _poly1305_final_avx
.p2align 4
_poly1305_final_avx:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
movq %rsi, %rbx
movq 608(%rdi), %rax
testq %rax, %rax
je L_poly1305_avx_final_no_more
movb $0x01, 480(%rdi,%rax,1)
jmp L_poly1305_avx_final_cmp_rem
L_poly1305_avx_final_zero_rem:
movb $0x00, 480(%rdi,%rax,1)
L_poly1305_avx_final_cmp_rem:
incb %al
cmpq $16, %rax
jl L_poly1305_avx_final_zero_rem
movb $0x00, 616(%rdi)
leaq 480(%rdi), %rsi
#ifndef __APPLE__
callq poly1305_block_avx@plt
#else
callq _poly1305_block_avx
#endif /* __APPLE__ */
L_poly1305_avx_final_no_more:
movq 24(%rdi), %rax
movq 32(%rdi), %rdx
movq 40(%rdi), %rcx
movq 48(%rdi), %r11
movq 56(%rdi), %r12
# h %= p
# h = (h + pad)
# mod 2^130 - 5
movq %rcx, %r8
andq $3, %rcx
shrq $2, %r8
# Multiply by 5
leaq 0(%r8,%r8,4), %r8
addq %r8, %rax
adcq $0x00, %rdx
adcq $0x00, %rcx
# Fixup when between (1 << 130) - 1 and (1 << 130) - 5
movq %rax, %r8
movq %rdx, %r9
movq %rcx, %r10
addq $5, %r8
adcq $0x00, %r9
adcq $0x00, %r10
cmpq $4, %r10
cmoveq %r8, %rax
cmoveq %r9, %rdx
# h += pad
addq %r11, %rax
adcq %r12, %rdx
movq %rax, (%rbx)
movq %rdx, 8(%rbx)
# Zero out r
movq $0x00, (%rdi)
movq $0x00, 8(%rdi)
# Zero out h
movq $0x00, 24(%rdi)
movq $0x00, 32(%rdi)
movq $0x00, 40(%rdi)
# Zero out pad
movq $0x00, 48(%rdi)
movq $0x00, 56(%rdi)
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size poly1305_final_avx,.-poly1305_final_avx
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX1 */
#ifdef HAVE_INTEL_AVX2
#ifndef __APPLE__
.text
.globl poly1305_calc_powers_avx2
.type poly1305_calc_powers_avx2,@function
.align 16
poly1305_calc_powers_avx2:
#else
.section __TEXT,__text
.globl _poly1305_calc_powers_avx2
.p2align 4
_poly1305_calc_powers_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbx
pushq %rbp
movq (%rdi), %rcx
movq 8(%rdi), %r8
xorq %r9, %r9
# Convert to 26 bits in 32
movq %rcx, %rax
movq %rcx, %rdx
movq %rcx, %rsi
movq %r8, %rbx
movq %r8, %rbp
shrq $26, %rdx
shrdq $52, %r8, %rsi
shrq $14, %rbx
shrdq $40, %r9, %rbp
andq $0x3ffffff, %rax
andq $0x3ffffff, %rdx
andq $0x3ffffff, %rsi
andq $0x3ffffff, %rbx
andq $0x3ffffff, %rbp
movl %eax, 224(%rdi)
movl %edx, 228(%rdi)
movl %esi, 232(%rdi)
movl %ebx, 236(%rdi)
movl %ebp, 240(%rdi)
movl $0x00, 244(%rdi)
# Square 128-bit
movq %r8, %rax
mulq %rcx
xorq %r13, %r13
movq %rax, %r11
movq %rdx, %r12
addq %rax, %r11
adcq %rdx, %r12
adcq $0x00, %r13
movq %rcx, %rax
mulq %rax
movq %rax, %r10
movq %rdx, %r15
movq %r8, %rax
mulq %rax
addq %r15, %r11
adcq %rax, %r12
adcq %rdx, %r13
# Reduce 256-bit to 130-bit
movq %r12, %rax
movq %r13, %rdx
andq $-4, %rax
andq $3, %r12
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
shrdq $2, %rdx, %rax
shrq $2, %rdx
addq %rax, %r10
adcq %rdx, %r11
adcq $0x00, %r12
movq %r12, %rax
shrq $2, %rax
leaq 0(%rax,%rax,4), %rax
andq $3, %r12
addq %rax, %r10
adcq $0x00, %r11
adcq $0x00, %r12
# Convert to 26 bits in 32
movq %r10, %rax
movq %r10, %rdx
movq %r10, %rsi
movq %r11, %rbx
movq %r11, %rbp
shrq $26, %rdx
shrdq $52, %r11, %rsi
shrq $14, %rbx
shrdq $40, %r12, %rbp
andq $0x3ffffff, %rax
andq $0x3ffffff, %rdx
andq $0x3ffffff, %rsi
andq $0x3ffffff, %rbx
andq $0x3ffffff, %rbp
movl %eax, 256(%rdi)
movl %edx, 260(%rdi)
movl %esi, 264(%rdi)
movl %ebx, 268(%rdi)
movl %ebp, 272(%rdi)
movl $0x00, 276(%rdi)
# Multiply 128-bit by 130-bit
# r1[0] * r2[0]
movq %rcx, %rax
mulq %r10
movq %rax, %r13
movq %rdx, %r14
# r1[0] * r2[1]
movq %rcx, %rax
mulq %r11
movq $0x00, %r15
addq %rax, %r14
adcq %rdx, %r15
# r1[1] * r2[0]
movq %r8, %rax
mulq %r10
movq $0x00, %rsi
addq %rax, %r14
adcq %rdx, %r15
adcq $0x00, %rsi
# r1[0] * r2[2]
movq %rcx, %rax
mulq %r12
addq %rax, %r15
adcq %rdx, %rsi
# r1[1] * r2[1]
movq %r8, %rax
mulq %r11
movq $0x00, %rbx
addq %rax, %r15
adcq %rdx, %rsi
adcq $0x00, %rbx
# r1[1] * r2[2]
movq %r8, %rax
mulq %r12
addq %rax, %rsi
adcq %rdx, %rbx
# Reduce 260-bit to 130-bit
movq %r15, %rax
movq %rsi, %rdx
movq %rbx, %rbx
andq $-4, %rax
andq $3, %r15
addq %rax, %r13
adcq %rdx, %r14
adcq %rbx, %r15
shrdq $2, %rdx, %rax
shrdq $2, %rbx, %rdx
shrq $2, %rbx
addq %rax, %r13
adcq %rdx, %r14
adcq %rbx, %r15
movq %r15, %rax
andq $3, %r15
shrq $2, %rax
leaq 0(%rax,%rax,4), %rax
addq %rax, %r13
adcq $0x00, %r14
adcq $0x00, %r15
# Convert to 26 bits in 32
movq %r13, %rax
movq %r13, %rdx
movq %r13, %rsi
movq %r14, %rbx
movq %r14, %rbp
shrq $26, %rdx
shrdq $52, %r14, %rsi
shrq $14, %rbx
shrdq $40, %r15, %rbp
andq $0x3ffffff, %rax
andq $0x3ffffff, %rdx
andq $0x3ffffff, %rsi
andq $0x3ffffff, %rbx
andq $0x3ffffff, %rbp
movl %eax, 288(%rdi)
movl %edx, 292(%rdi)
movl %esi, 296(%rdi)
movl %ebx, 300(%rdi)
movl %ebp, 304(%rdi)
movl $0x00, 308(%rdi)
# Square 130-bit
movq %r11, %rax
mulq %r10
xorq %r13, %r13
movq %rax, %r8
movq %rdx, %r9
addq %rax, %r8
adcq %rdx, %r9
adcq $0x00, %r13
movq %r10, %rax
mulq %rax
movq %rax, %rcx
movq %rdx, %r15
movq %r11, %rax
mulq %rax
addq %r15, %r8
adcq %rax, %r9
adcq %rdx, %r13
movq %r12, %rax
mulq %rax
movq %rax, %r14
movq %r12, %rax
mulq %r10
addq %rax, %r9
adcq %rdx, %r13
adcq $0x00, %r14
addq %rax, %r9
adcq %rdx, %r13
adcq $0x00, %r14
movq %r12, %rax
mulq %r11
addq %rax, %r13
adcq %rdx, %r14
addq %rax, %r13
adcq %rdx, %r14
# Reduce 260-bit to 130-bit
movq %r9, %rax
movq %r13, %rdx
movq %r14, %r15
andq $-4, %rax
andq $3, %r9
addq %rax, %rcx
adcq %rdx, %r8
adcq %r15, %r9
shrdq $2, %rdx, %rax
shrdq $2, %r15, %rdx
shrq $2, %r15
addq %rax, %rcx
adcq %rdx, %r8
adcq %r15, %r9
movq %r9, %rax
andq $3, %r9
shrq $2, %rax
leaq 0(%rax,%rax,4), %rax
addq %rax, %rcx
adcq $0x00, %r8
adcq $0x00, %r9
# Convert to 26 bits in 32
movq %rcx, %rax
movq %rcx, %rdx
movq %rcx, %rsi
movq %r8, %rbx
movq %r8, %rbp
shrq $26, %rdx
shrdq $52, %r8, %rsi
shrq $14, %rbx
shrdq $40, %r9, %rbp
andq $0x3ffffff, %rax
andq $0x3ffffff, %rdx
andq $0x3ffffff, %rsi
andq $0x3ffffff, %rbx
andq $0x3ffffff, %rbp
movl %eax, 320(%rdi)
movl %edx, 324(%rdi)
movl %esi, 328(%rdi)
movl %ebx, 332(%rdi)
movl %ebp, 336(%rdi)
movl $0x00, 340(%rdi)
popq %rbp
popq %rbx
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size poly1305_calc_powers_avx2,.-poly1305_calc_powers_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl poly1305_setkey_avx2
.type poly1305_setkey_avx2,@function
.align 16
poly1305_setkey_avx2:
#else
.section __TEXT,__text
.globl _poly1305_setkey_avx2
.p2align 4
_poly1305_setkey_avx2:
#endif /* __APPLE__ */
#ifndef __APPLE__
callq poly1305_setkey_avx@plt
#else
callq _poly1305_setkey_avx
#endif /* __APPLE__ */
vpxor %ymm0, %ymm0, %ymm0
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm0, 160(%rdi)
vmovdqu %ymm0, 192(%rdi)
movq $0x00, 608(%rdi)
movw $0x00, 616(%rdi)
repz retq
#ifndef __APPLE__
.size poly1305_setkey_avx2,.-poly1305_setkey_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_poly1305_avx2_blocks_mask:
.quad 0x3ffffff, 0x3ffffff
.quad 0x3ffffff, 0x3ffffff
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_poly1305_avx2_blocks_hibit:
.quad 0x1000000, 0x1000000
.quad 0x1000000, 0x1000000
#ifndef __APPLE__
.text
.globl poly1305_blocks_avx2
.type poly1305_blocks_avx2,@function
.align 16
poly1305_blocks_avx2:
#else
.section __TEXT,__text
.globl _poly1305_blocks_avx2
.p2align 4
_poly1305_blocks_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %rbx
pushq %r13
pushq %r14
subq $0x140, %rsp
leaq L_poly1305_avx2_blocks_mask(%rip), %r13
leaq L_poly1305_avx2_blocks_hibit(%rip), %r14
movq %rsp, %rcx
andq $-32, %rcx
addq $32, %rcx
vpxor %ymm15, %ymm15, %ymm15
movq %rcx, %rbx
leaq 64(%rdi), %rax
addq $0xa0, %rbx
cmpw $0x00, 616(%rdi)
jne L_poly1305_avx2_blocks_begin_h
# Load the message data
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vperm2i128 $32, %ymm1, %ymm0, %ymm2
vperm2i128 $49, %ymm1, %ymm0, %ymm0
vpunpckldq %ymm0, %ymm2, %ymm1
vpunpckhdq %ymm0, %ymm2, %ymm3
vpunpckldq %ymm15, %ymm1, %ymm0
vpunpckhdq %ymm15, %ymm1, %ymm1
vpunpckldq %ymm15, %ymm3, %ymm2
vpunpckhdq %ymm15, %ymm3, %ymm3
vmovdqu (%r14), %ymm4
vpsllq $6, %ymm1, %ymm1
vpsllq $12, %ymm2, %ymm2
vpsllq $18, %ymm3, %ymm3
vmovdqu (%r13), %ymm14
# Reduce, in place, the message data
vpsrlq $26, %ymm0, %ymm10
vpsrlq $26, %ymm3, %ymm11
vpand %ymm14, %ymm0, %ymm0
vpand %ymm14, %ymm3, %ymm3
vpaddq %ymm1, %ymm10, %ymm1
vpaddq %ymm4, %ymm11, %ymm4
vpsrlq $26, %ymm1, %ymm10
vpsrlq $26, %ymm4, %ymm11
vpand %ymm14, %ymm1, %ymm1
vpand %ymm14, %ymm4, %ymm4
vpaddq %ymm2, %ymm10, %ymm2
vpslld $2, %ymm11, %ymm12
vpaddd %ymm12, %ymm11, %ymm12
vpsrlq $26, %ymm2, %ymm10
vpaddq %ymm0, %ymm12, %ymm0
vpsrlq $26, %ymm0, %ymm11
vpand %ymm14, %ymm2, %ymm2
vpand %ymm14, %ymm0, %ymm0
vpaddq %ymm3, %ymm10, %ymm3
vpaddq %ymm1, %ymm11, %ymm1
vpsrlq $26, %ymm3, %ymm10
vpand %ymm14, %ymm3, %ymm3
vpaddq %ymm4, %ymm10, %ymm4
addq $0x40, %rsi
subq $0x40, %rdx
jz L_poly1305_avx2_blocks_store
jmp L_poly1305_avx2_blocks_load_r4
L_poly1305_avx2_blocks_begin_h:
# Load the H values.
vmovdqu (%rax), %ymm0
vmovdqu 32(%rax), %ymm1
vmovdqu 64(%rax), %ymm2
vmovdqu 96(%rax), %ymm3
vmovdqu 128(%rax), %ymm4
# Check if there is a power of r to load - otherwise use r^4.
cmpb $0x00, 616(%rdi)
je L_poly1305_avx2_blocks_load_r4
# Load the 4 powers of r - r^4, r^3, r^2, r^1.
vmovdqu 224(%rdi), %ymm8
vmovdqu 256(%rdi), %ymm7
vmovdqu 288(%rdi), %ymm6
vmovdqu 320(%rdi), %ymm5
vpermq $0xd8, %ymm5, %ymm5
vpermq $0xd8, %ymm6, %ymm6
vpermq $0xd8, %ymm7, %ymm7
vpermq $0xd8, %ymm8, %ymm8
vpunpcklqdq %ymm6, %ymm5, %ymm10
vpunpckhqdq %ymm6, %ymm5, %ymm11
vpunpcklqdq %ymm8, %ymm7, %ymm12
vpunpckhqdq %ymm8, %ymm7, %ymm13
vperm2i128 $32, %ymm12, %ymm10, %ymm5
vperm2i128 $49, %ymm12, %ymm10, %ymm7
vperm2i128 $32, %ymm13, %ymm11, %ymm9
vpsrlq $32, %ymm5, %ymm6
vpsrlq $32, %ymm7, %ymm8
jmp L_poly1305_avx2_blocks_mul_5
L_poly1305_avx2_blocks_load_r4:
# Load r^4 into all four positions.
vmovdqu 320(%rdi), %ymm13
vpermq $0x00, %ymm13, %ymm5
vpsrlq $32, %ymm13, %ymm14
vpermq $0x55, %ymm13, %ymm7
vpermq $0xaa, %ymm13, %ymm9
vpermq $0x00, %ymm14, %ymm6
vpermq $0x55, %ymm14, %ymm8
L_poly1305_avx2_blocks_mul_5:
# Multiply top 4 26-bit values of all four H by 5
vpslld $2, %ymm6, %ymm10
vpslld $2, %ymm7, %ymm11
vpslld $2, %ymm8, %ymm12
vpslld $2, %ymm9, %ymm13
vpaddq %ymm10, %ymm6, %ymm10
vpaddq %ymm11, %ymm7, %ymm11
vpaddq %ymm12, %ymm8, %ymm12
vpaddq %ymm13, %ymm9, %ymm13
# Store powers of r and multiple of 5 for use in multiply.
vmovdqa %ymm10, (%rbx)
vmovdqa %ymm11, 32(%rbx)
vmovdqa %ymm12, 64(%rbx)
vmovdqa %ymm13, 96(%rbx)
vmovdqa %ymm5, (%rcx)
vmovdqa %ymm6, 32(%rcx)
vmovdqa %ymm7, 64(%rcx)
vmovdqa %ymm8, 96(%rcx)
vmovdqa %ymm9, 128(%rcx)
vmovdqu (%r13), %ymm14
# If not finished then loop over data
cmpb $0x01, 616(%rdi)
jne L_poly1305_avx2_blocks_start
# Do last multiply, reduce, add the four H together and move to
# 32-bit registers
vpmuludq (%rbx), %ymm4, %ymm5
vpmuludq 32(%rbx), %ymm3, %ymm10
vpmuludq 32(%rbx), %ymm4, %ymm6
vpmuludq 64(%rbx), %ymm3, %ymm11
vpmuludq 64(%rbx), %ymm4, %ymm7
vpaddq %ymm5, %ymm10, %ymm5
vpmuludq 64(%rbx), %ymm2, %ymm12
vpmuludq 96(%rbx), %ymm4, %ymm8
vpaddq %ymm6, %ymm11, %ymm6
vpmuludq 96(%rbx), %ymm1, %ymm13
vpmuludq 96(%rbx), %ymm2, %ymm10
vpaddq %ymm5, %ymm12, %ymm5
vpmuludq 96(%rbx), %ymm3, %ymm11
vpmuludq (%rcx), %ymm3, %ymm12
vpaddq %ymm5, %ymm13, %ymm5
vpmuludq (%rcx), %ymm4, %ymm9
vpaddq %ymm6, %ymm10, %ymm6
vpmuludq (%rcx), %ymm0, %ymm13
vpaddq %ymm7, %ymm11, %ymm7
vpmuludq (%rcx), %ymm1, %ymm10
vpaddq %ymm8, %ymm12, %ymm8
vpmuludq (%rcx), %ymm2, %ymm11
vpmuludq 32(%rcx), %ymm2, %ymm12
vpaddq %ymm5, %ymm13, %ymm5
vpmuludq 32(%rcx), %ymm3, %ymm13
vpaddq %ymm6, %ymm10, %ymm6
vpmuludq 32(%rcx), %ymm0, %ymm10
vpaddq %ymm7, %ymm11, %ymm7
vpmuludq 32(%rcx), %ymm1, %ymm11
vpaddq %ymm8, %ymm12, %ymm8
vpmuludq 64(%rcx), %ymm1, %ymm12
vpaddq %ymm9, %ymm13, %ymm9
vpmuludq 64(%rcx), %ymm2, %ymm13
vpaddq %ymm6, %ymm10, %ymm6
vpmuludq 64(%rcx), %ymm0, %ymm10
vpaddq %ymm7, %ymm11, %ymm7
vpmuludq 96(%rcx), %ymm0, %ymm11
vpaddq %ymm8, %ymm12, %ymm8
vpmuludq 96(%rcx), %ymm1, %ymm12
vpaddq %ymm9, %ymm13, %ymm9
vpaddq %ymm7, %ymm10, %ymm7
vpmuludq 128(%rcx), %ymm0, %ymm13
vpaddq %ymm8, %ymm11, %ymm8
vpaddq %ymm9, %ymm12, %ymm9
vpaddq %ymm9, %ymm13, %ymm9
vpsrlq $26, %ymm5, %ymm10
vpsrlq $26, %ymm8, %ymm11
vpand %ymm14, %ymm5, %ymm5
vpand %ymm14, %ymm8, %ymm8
vpaddq %ymm6, %ymm10, %ymm6
vpaddq %ymm9, %ymm11, %ymm9
vpsrlq $26, %ymm6, %ymm10
vpsrlq $26, %ymm9, %ymm11
vpand %ymm14, %ymm6, %ymm1
vpand %ymm14, %ymm9, %ymm4
vpaddq %ymm7, %ymm10, %ymm7
vpslld $2, %ymm11, %ymm12
vpaddd %ymm12, %ymm11, %ymm12
vpsrlq $26, %ymm7, %ymm10
vpaddq %ymm5, %ymm12, %ymm5
vpsrlq $26, %ymm5, %ymm11
vpand %ymm14, %ymm7, %ymm2
vpand %ymm14, %ymm5, %ymm0
vpaddq %ymm8, %ymm10, %ymm8
vpaddq %ymm1, %ymm11, %ymm1
vpsrlq $26, %ymm8, %ymm10
vpand %ymm14, %ymm8, %ymm3
vpaddq %ymm4, %ymm10, %ymm4
vpsrldq $8, %ymm0, %ymm5
vpsrldq $8, %ymm1, %ymm6
vpsrldq $8, %ymm2, %ymm7
vpsrldq $8, %ymm3, %ymm8
vpsrldq $8, %ymm4, %ymm9
vpaddq %ymm0, %ymm5, %ymm0
vpaddq %ymm1, %ymm6, %ymm1
vpaddq %ymm2, %ymm7, %ymm2
vpaddq %ymm3, %ymm8, %ymm3
vpaddq %ymm4, %ymm9, %ymm4
vpermq $2, %ymm0, %ymm5
vpermq $2, %ymm1, %ymm6
vpermq $2, %ymm2, %ymm7
vpermq $2, %ymm3, %ymm8
vpermq $2, %ymm4, %ymm9
vpaddq %ymm0, %ymm5, %ymm0
vpaddq %ymm1, %ymm6, %ymm1
vpaddq %ymm2, %ymm7, %ymm2
vpaddq %ymm3, %ymm8, %ymm3
vpaddq %ymm4, %ymm9, %ymm4
vmovd %xmm0, %r8d
vmovd %xmm1, %r9d
vmovd %xmm2, %r10d
vmovd %xmm3, %r11d
vmovd %xmm4, %r12d
jmp L_poly1305_avx2_blocks_end_calc
L_poly1305_avx2_blocks_start:
vmovdqu (%rsi), %ymm5
vmovdqu 32(%rsi), %ymm6
vperm2i128 $32, %ymm6, %ymm5, %ymm7
vperm2i128 $49, %ymm6, %ymm5, %ymm5
vpunpckldq %ymm5, %ymm7, %ymm6
vpunpckhdq %ymm5, %ymm7, %ymm8
vpunpckldq %ymm15, %ymm6, %ymm5
vpunpckhdq %ymm15, %ymm6, %ymm6
vpunpckldq %ymm15, %ymm8, %ymm7
vpunpckhdq %ymm15, %ymm8, %ymm8
vmovdqu (%r14), %ymm9
vpsllq $6, %ymm6, %ymm6
vpsllq $12, %ymm7, %ymm7
vpsllq $18, %ymm8, %ymm8
vpmuludq (%rbx), %ymm4, %ymm10
vpaddq %ymm5, %ymm10, %ymm5
vpmuludq 32(%rbx), %ymm3, %ymm10
vpmuludq 32(%rbx), %ymm4, %ymm11
vpaddq %ymm6, %ymm11, %ymm6
vpmuludq 64(%rbx), %ymm3, %ymm11
vpmuludq 64(%rbx), %ymm4, %ymm12
vpaddq %ymm7, %ymm12, %ymm7
vpaddq %ymm5, %ymm10, %ymm5
vpmuludq 64(%rbx), %ymm2, %ymm12
vpmuludq 96(%rbx), %ymm4, %ymm13
vpaddq %ymm8, %ymm13, %ymm8
vpaddq %ymm6, %ymm11, %ymm6
vpmuludq 96(%rbx), %ymm1, %ymm13
vpmuludq 96(%rbx), %ymm2, %ymm10
vpaddq %ymm5, %ymm12, %ymm5
vpmuludq 96(%rbx), %ymm3, %ymm11
vpmuludq (%rcx), %ymm3, %ymm12
vpaddq %ymm5, %ymm13, %ymm5
vpmuludq (%rcx), %ymm4, %ymm13
vpaddq %ymm9, %ymm13, %ymm9
vpaddq %ymm6, %ymm10, %ymm6
vpmuludq (%rcx), %ymm0, %ymm13
vpaddq %ymm7, %ymm11, %ymm7
vpmuludq (%rcx), %ymm1, %ymm10
vpaddq %ymm8, %ymm12, %ymm8
vpmuludq (%rcx), %ymm2, %ymm11
vpmuludq 32(%rcx), %ymm2, %ymm12
vpaddq %ymm5, %ymm13, %ymm5
vpmuludq 32(%rcx), %ymm3, %ymm13
vpaddq %ymm6, %ymm10, %ymm6
vpmuludq 32(%rcx), %ymm0, %ymm10
vpaddq %ymm7, %ymm11, %ymm7
vpmuludq 32(%rcx), %ymm1, %ymm11
vpaddq %ymm8, %ymm12, %ymm8
vpmuludq 64(%rcx), %ymm1, %ymm12
vpaddq %ymm9, %ymm13, %ymm9
vpmuludq 64(%rcx), %ymm2, %ymm13
vpaddq %ymm6, %ymm10, %ymm6
vpmuludq 64(%rcx), %ymm0, %ymm10
vpaddq %ymm7, %ymm11, %ymm7
vpmuludq 96(%rcx), %ymm0, %ymm11
vpaddq %ymm8, %ymm12, %ymm8
vpmuludq 96(%rcx), %ymm1, %ymm12
vpaddq %ymm9, %ymm13, %ymm9
vpaddq %ymm7, %ymm10, %ymm7
vpmuludq 128(%rcx), %ymm0, %ymm13
vpaddq %ymm8, %ymm11, %ymm8
vpaddq %ymm9, %ymm12, %ymm9
vpaddq %ymm9, %ymm13, %ymm9
vpsrlq $26, %ymm5, %ymm10
vpsrlq $26, %ymm8, %ymm11
vpand %ymm14, %ymm5, %ymm5
vpand %ymm14, %ymm8, %ymm8
vpaddq %ymm6, %ymm10, %ymm6
vpaddq %ymm9, %ymm11, %ymm9
vpsrlq $26, %ymm6, %ymm10
vpsrlq $26, %ymm9, %ymm11
vpand %ymm14, %ymm6, %ymm1
vpand %ymm14, %ymm9, %ymm4
vpaddq %ymm7, %ymm10, %ymm7
vpslld $2, %ymm11, %ymm12
vpaddd %ymm12, %ymm11, %ymm12
vpsrlq $26, %ymm7, %ymm10
vpaddq %ymm5, %ymm12, %ymm5
vpsrlq $26, %ymm5, %ymm11
vpand %ymm14, %ymm7, %ymm2
vpand %ymm14, %ymm5, %ymm0
vpaddq %ymm8, %ymm10, %ymm8
vpaddq %ymm1, %ymm11, %ymm1
vpsrlq $26, %ymm8, %ymm10
vpand %ymm14, %ymm8, %ymm3
vpaddq %ymm4, %ymm10, %ymm4
addq $0x40, %rsi
subq $0x40, %rdx
jnz L_poly1305_avx2_blocks_start
L_poly1305_avx2_blocks_store:
# Store four H values - state
vmovdqu %ymm0, (%rax)
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, 128(%rax)
L_poly1305_avx2_blocks_end_calc:
cmpb $0x00, 616(%rdi)
je L_poly1305_avx2_blocks_complete
movq %r8, %rax
movq %r10, %rdx
movq %r12, %rcx
shrq $12, %rdx
shrq $24, %rcx
shlq $26, %r9
shlq $52, %r10
shlq $14, %r11
shlq $40, %r12
addq %r9, %rax
adcq %r10, %rax
adcq %r11, %rdx
adcq %r12, %rdx
adcq $0x00, %rcx
movq %rcx, %r8
andq $3, %rcx
shrq $2, %r8
leaq 0(%r8,%r8,4), %r8
addq %r8, %rax
adcq $0x00, %rdx
adcq $0x00, %rcx
movq %rax, 24(%rdi)
movq %rdx, 32(%rdi)
movq %rcx, 40(%rdi)
L_poly1305_avx2_blocks_complete:
movb $0x01, 617(%rdi)
vzeroupper
addq $0x140, %rsp
popq %r14
popq %r13
popq %rbx
popq %r12
repz retq
#ifndef __APPLE__
.size poly1305_blocks_avx2,.-poly1305_blocks_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl poly1305_final_avx2
.type poly1305_final_avx2,@function
.align 16
poly1305_final_avx2:
#else
.section __TEXT,__text
.globl _poly1305_final_avx2
.p2align 4
_poly1305_final_avx2:
#endif /* __APPLE__ */
movb $0x01, 616(%rdi)
movb 617(%rdi), %cl
cmpb $0x00, %cl
je L_poly1305_avx2_final_done_blocks_X4
pushq %rsi
movq $0x40, %rdx
xorq %rsi, %rsi
#ifndef __APPLE__
callq poly1305_blocks_avx2@plt
#else
callq _poly1305_blocks_avx2
#endif /* __APPLE__ */
popq %rsi
L_poly1305_avx2_final_done_blocks_X4:
movq 608(%rdi), %rax
movq %rax, %rcx
andq $-16, %rcx
cmpb $0x00, %cl
je L_poly1305_avx2_final_done_blocks
pushq %rcx
pushq %rax
pushq %rsi
movq %rcx, %rdx
leaq 480(%rdi), %rsi
#ifndef __APPLE__
callq poly1305_blocks_avx@plt
#else
callq _poly1305_blocks_avx
#endif /* __APPLE__ */
popq %rsi
popq %rax
popq %rcx
L_poly1305_avx2_final_done_blocks:
subq %rcx, 608(%rdi)
xorq %rdx, %rdx
jmp L_poly1305_avx2_final_cmp_copy
L_poly1305_avx2_final_start_copy:
movb 480(%rdi,%rcx,1), %r8b
movb %r8b, 480(%rdi,%rdx,1)
incb %cl
incb %dl
L_poly1305_avx2_final_cmp_copy:
cmpb %cl, %al
jne L_poly1305_avx2_final_start_copy
#ifndef __APPLE__
callq poly1305_final_avx@plt
#else
callq _poly1305_final_avx
#endif /* __APPLE__ */
vpxor %ymm0, %ymm0, %ymm0
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm0, 160(%rdi)
vmovdqu %ymm0, 192(%rdi)
vmovdqu %ymm0, 224(%rdi)
vmovdqu %ymm0, 256(%rdi)
vmovdqu %ymm0, 288(%rdi)
vmovdqu %ymm0, 320(%rdi)
movq $0x00, 608(%rdi)
movw $0x00, 616(%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size poly1305_final_avx2,.-poly1305_final_avx2
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* WOLFSSL_X86_64_BUILD */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,394
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/COMP/COMP_AnalogWatchdog/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/COMP/COMP_AnalogWatchdog/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/COMP/COMP_AnalogWatchdog/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/IWDG/IWDG_Reset/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/IWDG/IWDG_Reset/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/IWDG/IWDG_Reset/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aenu1/aps3e
| 926,889
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/wc_kyber_asm.S
|
/* wc_kyber_asm.S */
/*
* Copyright (C) 2006-2024 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef WOLFSSL_USER_SETTINGS
#ifdef WOLFSSL_USER_SETTINGS_ASM
/*
* user_settings_asm.h is a file generated by the script user_settings_asm.sh.
* The script takes in a user_settings.h and produces user_settings_asm.h, which
* is a stripped down version of user_settings.h containing only preprocessor
* directives. This makes the header safe to include in assembly (.S) files.
*/
#include "user_settings_asm.h"
#else
/*
* Note: if user_settings.h contains any C code (e.g. a typedef or function
* prototype), including it here in an assembly (.S) file will cause an
* assembler failure. See user_settings_asm.h above.
*/
#include "user_settings.h"
#endif /* WOLFSSL_USER_SETTINGS_ASM */
#endif /* WOLFSSL_USER_SETTINGS */
#ifndef HAVE_INTEL_AVX1
#define HAVE_INTEL_AVX1
#endif /* HAVE_INTEL_AVX1 */
#ifndef NO_AVX2_SUPPORT
#define HAVE_INTEL_AVX2
#endif /* NO_AVX2_SUPPORT */
#ifdef WOLFSSL_WC_KYBER
#ifdef HAVE_INTEL_AVX2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
kyber_q:
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
kyber_qinv:
.value 0xf301,0xf301
.value 0xf301,0xf301
.value 0xf301,0xf301
.value 0xf301,0xf301
.value 0xf301,0xf301
.value 0xf301,0xf301
.value 0xf301,0xf301
.value 0xf301,0xf301
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
kyber_f:
.value 0x549,0x549
.value 0x549,0x549
.value 0x549,0x549
.value 0x549,0x549
.value 0x549,0x549
.value 0x549,0x549
.value 0x549,0x549
.value 0x549,0x549
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
kyber_f_qinv:
.value 0x5049,0x5049
.value 0x5049,0x5049
.value 0x5049,0x5049
.value 0x5049,0x5049
.value 0x5049,0x5049
.value 0x5049,0x5049
.value 0x5049,0x5049
.value 0x5049,0x5049
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
kyber_v:
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_avx2_zetas:
.value 0xa0b,0xa0b
.value 0xa0b,0xa0b
.value 0xa0b,0xa0b
.value 0xa0b,0xa0b
.value 0xa0b,0xa0b
.value 0xa0b,0xa0b
.value 0xa0b,0xa0b
.value 0xa0b,0xa0b
.value 0x7b0b,0x7b0b
.value 0x7b0b,0x7b0b
.value 0x7b0b,0x7b0b
.value 0x7b0b,0x7b0b
.value 0x7b0b,0x7b0b
.value 0x7b0b,0x7b0b
.value 0x7b0b,0x7b0b
.value 0x7b0b,0x7b0b
.value 0xb9a,0xb9a
.value 0xb9a,0xb9a
.value 0xb9a,0xb9a
.value 0xb9a,0xb9a
.value 0xb9a,0xb9a
.value 0xb9a,0xb9a
.value 0xb9a,0xb9a
.value 0xb9a,0xb9a
.value 0x399a,0x399a
.value 0x399a,0x399a
.value 0x399a,0x399a
.value 0x399a,0x399a
.value 0x399a,0x399a
.value 0x399a,0x399a
.value 0x399a,0x399a
.value 0x399a,0x399a
.value 0x5d5,0x5d5
.value 0x5d5,0x5d5
.value 0x5d5,0x5d5
.value 0x5d5,0x5d5
.value 0x5d5,0x5d5
.value 0x5d5,0x5d5
.value 0x5d5,0x5d5
.value 0x5d5,0x5d5
.value 0x34d5,0x34d5
.value 0x34d5,0x34d5
.value 0x34d5,0x34d5
.value 0x34d5,0x34d5
.value 0x34d5,0x34d5
.value 0x34d5,0x34d5
.value 0x34d5,0x34d5
.value 0x34d5,0x34d5
.value 0x58e,0x58e
.value 0x58e,0x58e
.value 0x58e,0x58e
.value 0x58e,0x58e
.value 0x58e,0x58e
.value 0x58e,0x58e
.value 0x58e,0x58e
.value 0x58e,0x58e
.value 0xcf8e,0xcf8e
.value 0xcf8e,0xcf8e
.value 0xcf8e,0xcf8e
.value 0xcf8e,0xcf8e
.value 0xcf8e,0xcf8e
.value 0xcf8e,0xcf8e
.value 0xcf8e,0xcf8e
.value 0xcf8e,0xcf8e
.value 0xc56,0xc56
.value 0xc56,0xc56
.value 0xc56,0xc56
.value 0xc56,0xc56
.value 0xc56,0xc56
.value 0xc56,0xc56
.value 0xc56,0xc56
.value 0xc56,0xc56
.value 0xae56,0xae56
.value 0xae56,0xae56
.value 0xae56,0xae56
.value 0xae56,0xae56
.value 0xae56,0xae56
.value 0xae56,0xae56
.value 0xae56,0xae56
.value 0xae56,0xae56
.value 0x26e,0x26e
.value 0x26e,0x26e
.value 0x26e,0x26e
.value 0x26e,0x26e
.value 0x26e,0x26e
.value 0x26e,0x26e
.value 0x26e,0x26e
.value 0x26e,0x26e
.value 0x6c6e,0x6c6e
.value 0x6c6e,0x6c6e
.value 0x6c6e,0x6c6e
.value 0x6c6e,0x6c6e
.value 0x6c6e,0x6c6e
.value 0x6c6e,0x6c6e
.value 0x6c6e,0x6c6e
.value 0x6c6e,0x6c6e
.value 0x629,0x629
.value 0x629,0x629
.value 0x629,0x629
.value 0x629,0x629
.value 0x629,0x629
.value 0x629,0x629
.value 0x629,0x629
.value 0x629,0x629
.value 0xf129,0xf129
.value 0xf129,0xf129
.value 0xf129,0xf129
.value 0xf129,0xf129
.value 0xf129,0xf129
.value 0xf129,0xf129
.value 0xf129,0xf129
.value 0xf129,0xf129
.value 0xb6,0xb6
.value 0xb6,0xb6
.value 0xb6,0xb6
.value 0xb6,0xb6
.value 0xb6,0xb6
.value 0xb6,0xb6
.value 0xb6,0xb6
.value 0xb6,0xb6
.value 0xc2b6,0xc2b6
.value 0xc2b6,0xc2b6
.value 0xc2b6,0xc2b6
.value 0xc2b6,0xc2b6
.value 0xc2b6,0xc2b6
.value 0xc2b6,0xc2b6
.value 0xc2b6,0xc2b6
.value 0xc2b6,0xc2b6
.value 0x23d,0x23d
.value 0x23d,0x23d
.value 0x23d,0x23d
.value 0x23d,0x23d
.value 0x7d4,0x7d4
.value 0x7d4,0x7d4
.value 0x7d4,0x7d4
.value 0x7d4,0x7d4
.value 0xe93d,0xe93d
.value 0xe93d,0xe93d
.value 0xe93d,0xe93d
.value 0xe93d,0xe93d
.value 0x43d4,0x43d4
.value 0x43d4,0x43d4
.value 0x43d4,0x43d4
.value 0x43d4,0x43d4
.value 0x108,0x108
.value 0x108,0x108
.value 0x108,0x108
.value 0x108,0x108
.value 0x17f,0x17f
.value 0x17f,0x17f
.value 0x17f,0x17f
.value 0x17f,0x17f
.value 0x9908,0x9908
.value 0x9908,0x9908
.value 0x9908,0x9908
.value 0x9908,0x9908
.value 0x8e7f,0x8e7f
.value 0x8e7f,0x8e7f
.value 0x8e7f,0x8e7f
.value 0x8e7f,0x8e7f
.value 0x4c7,0x4c7
.value 0x4c7,0x4c7
.value 0x28c,0x28c
.value 0x28c,0x28c
.value 0xad9,0xad9
.value 0xad9,0xad9
.value 0x3f7,0x3f7
.value 0x3f7,0x3f7
.value 0xe9c7,0xe9c7
.value 0xe9c7,0xe9c7
.value 0xe68c,0xe68c
.value 0xe68c,0xe68c
.value 0x5d9,0x5d9
.value 0x5d9,0x5d9
.value 0x78f7,0x78f7
.value 0x78f7,0x78f7
.value 0x7f4,0x7f4
.value 0x7f4,0x7f4
.value 0x5d3,0x5d3
.value 0x5d3,0x5d3
.value 0xbe7,0xbe7
.value 0xbe7,0xbe7
.value 0x6f9,0x6f9
.value 0x6f9,0x6f9
.value 0xa3f4,0xa3f4
.value 0xa3f4,0xa3f4
.value 0x4ed3,0x4ed3
.value 0x4ed3,0x4ed3
.value 0x50e7,0x50e7
.value 0x50e7,0x50e7
.value 0x61f9,0x61f9
.value 0x61f9,0x61f9
.value 0x9c4,0x9c4
.value 0x9c4,0x9c4
.value 0x9c4,0x9c4
.value 0x9c4,0x9c4
.value 0x5b2,0x5b2
.value 0x5b2,0x5b2
.value 0x5b2,0x5b2
.value 0x5b2,0x5b2
.value 0x15c4,0x15c4
.value 0x15c4,0x15c4
.value 0x15c4,0x15c4
.value 0x15c4,0x15c4
.value 0xfbb2,0xfbb2
.value 0xfbb2,0xfbb2
.value 0xfbb2,0xfbb2
.value 0xfbb2,0xfbb2
.value 0x6bf,0x6bf
.value 0x6bf,0x6bf
.value 0x6bf,0x6bf
.value 0x6bf,0x6bf
.value 0xc7f,0xc7f
.value 0xc7f,0xc7f
.value 0xc7f,0xc7f
.value 0xc7f,0xc7f
.value 0x53bf,0x53bf
.value 0x53bf,0x53bf
.value 0x53bf,0x53bf
.value 0x53bf,0x53bf
.value 0x997f,0x997f
.value 0x997f,0x997f
.value 0x997f,0x997f
.value 0x997f,0x997f
.value 0x204,0x204
.value 0x204,0x204
.value 0xcf9,0xcf9
.value 0xcf9,0xcf9
.value 0xbc1,0xbc1
.value 0xbc1,0xbc1
.value 0xa67,0xa67
.value 0xa67,0xa67
.value 0xce04,0xce04
.value 0xce04,0xce04
.value 0x67f9,0x67f9
.value 0x67f9,0x67f9
.value 0x3ec1,0x3ec1
.value 0x3ec1,0x3ec1
.value 0xcf67,0xcf67
.value 0xcf67,0xcf67
.value 0x6af,0x6af
.value 0x6af,0x6af
.value 0x877,0x877
.value 0x877,0x877
.value 0x7e,0x7e
.value 0x7e,0x7e
.value 0x5bd,0x5bd
.value 0x5bd,0x5bd
.value 0x23af,0x23af
.value 0x23af,0x23af
.value 0xfd77,0xfd77
.value 0xfd77,0xfd77
.value 0x9a7e,0x9a7e
.value 0x9a7e,0x9a7e
.value 0x6cbd,0x6cbd
.value 0x6cbd,0x6cbd
.value 0x8b2,0x8b2
.value 0x1ae,0x1ae
.value 0x22b,0x22b
.value 0x34b,0x34b
.value 0x81e,0x81e
.value 0x367,0x367
.value 0x60e,0x60e
.value 0x69,0x69
.value 0xfeb2,0xfeb2
.value 0x2bae,0x2bae
.value 0xd32b,0xd32b
.value 0x344b,0x344b
.value 0x821e,0x821e
.value 0xc867,0xc867
.value 0x500e,0x500e
.value 0xab69,0xab69
.value 0x1a6,0x1a6
.value 0x24b,0x24b
.value 0xb1,0xb1
.value 0xc16,0xc16
.value 0xbde,0xbde
.value 0xb35,0xb35
.value 0x626,0x626
.value 0x675,0x675
.value 0x93a6,0x93a6
.value 0x334b,0x334b
.value 0x3b1,0x3b1
.value 0xee16,0xee16
.value 0xc5de,0xc5de
.value 0x5a35,0x5a35
.value 0x1826,0x1826
.value 0x1575,0x1575
.value 0xc0b,0xc0b
.value 0x30a,0x30a
.value 0x487,0x487
.value 0xc6e,0xc6e
.value 0x9f8,0x9f8
.value 0x5cb,0x5cb
.value 0xaa7,0xaa7
.value 0x45f,0x45f
.value 0x7d0b,0x7d0b
.value 0x810a,0x810a
.value 0x2987,0x2987
.value 0x766e,0x766e
.value 0x71f8,0x71f8
.value 0xb6cb,0xb6cb
.value 0x8fa7,0x8fa7
.value 0x315f,0x315f
.value 0x6cb,0x6cb
.value 0x284,0x284
.value 0x999,0x999
.value 0x15d,0x15d
.value 0x1a2,0x1a2
.value 0x149,0x149
.value 0xc65,0xc65
.value 0xcb6,0xcb6
.value 0xb7cb,0xb7cb
.value 0x4e84,0x4e84
.value 0x4499,0x4499
.value 0x485d,0x485d
.value 0xc7a2,0xc7a2
.value 0x4c49,0x4c49
.value 0xeb65,0xeb65
.value 0xceb6,0xceb6
.value 0x714,0x714
.value 0x714,0x714
.value 0x714,0x714
.value 0x714,0x714
.value 0x714,0x714
.value 0x714,0x714
.value 0x714,0x714
.value 0x714,0x714
.value 0x314,0x314
.value 0x314,0x314
.value 0x314,0x314
.value 0x314,0x314
.value 0x314,0x314
.value 0x314,0x314
.value 0x314,0x314
.value 0x314,0x314
.value 0x11f,0x11f
.value 0x11f,0x11f
.value 0x11f,0x11f
.value 0x11f,0x11f
.value 0x11f,0x11f
.value 0x11f,0x11f
.value 0x11f,0x11f
.value 0x11f,0x11f
.value 0x6e1f,0x6e1f
.value 0x6e1f,0x6e1f
.value 0x6e1f,0x6e1f
.value 0x6e1f,0x6e1f
.value 0x6e1f,0x6e1f
.value 0x6e1f,0x6e1f
.value 0x6e1f,0x6e1f
.value 0x6e1f,0x6e1f
.value 0xca,0xca
.value 0xca,0xca
.value 0xca,0xca
.value 0xca,0xca
.value 0xca,0xca
.value 0xca,0xca
.value 0xca,0xca
.value 0xca,0xca
.value 0xbeca,0xbeca
.value 0xbeca,0xbeca
.value 0xbeca,0xbeca
.value 0xbeca,0xbeca
.value 0xbeca,0xbeca
.value 0xbeca,0xbeca
.value 0xbeca,0xbeca
.value 0xbeca,0xbeca
.value 0x3c2,0x3c2
.value 0x3c2,0x3c2
.value 0x3c2,0x3c2
.value 0x3c2,0x3c2
.value 0x3c2,0x3c2
.value 0x3c2,0x3c2
.value 0x3c2,0x3c2
.value 0x3c2,0x3c2
.value 0x29c2,0x29c2
.value 0x29c2,0x29c2
.value 0x29c2,0x29c2
.value 0x29c2,0x29c2
.value 0x29c2,0x29c2
.value 0x29c2,0x29c2
.value 0x29c2,0x29c2
.value 0x29c2,0x29c2
.value 0x84f,0x84f
.value 0x84f,0x84f
.value 0x84f,0x84f
.value 0x84f,0x84f
.value 0x84f,0x84f
.value 0x84f,0x84f
.value 0x84f,0x84f
.value 0x84f,0x84f
.value 0x54f,0x54f
.value 0x54f,0x54f
.value 0x54f,0x54f
.value 0x54f,0x54f
.value 0x54f,0x54f
.value 0x54f,0x54f
.value 0x54f,0x54f
.value 0x54f,0x54f
.value 0x73f,0x73f
.value 0x73f,0x73f
.value 0x73f,0x73f
.value 0x73f,0x73f
.value 0x73f,0x73f
.value 0x73f,0x73f
.value 0x73f,0x73f
.value 0x73f,0x73f
.value 0xd43f,0xd43f
.value 0xd43f,0xd43f
.value 0xd43f,0xd43f
.value 0xd43f,0xd43f
.value 0xd43f,0xd43f
.value 0xd43f,0xd43f
.value 0xd43f,0xd43f
.value 0xd43f,0xd43f
.value 0x5bc,0x5bc
.value 0x5bc,0x5bc
.value 0x5bc,0x5bc
.value 0x5bc,0x5bc
.value 0x5bc,0x5bc
.value 0x5bc,0x5bc
.value 0x5bc,0x5bc
.value 0x5bc,0x5bc
.value 0x79bc,0x79bc
.value 0x79bc,0x79bc
.value 0x79bc,0x79bc
.value 0x79bc,0x79bc
.value 0x79bc,0x79bc
.value 0x79bc,0x79bc
.value 0x79bc,0x79bc
.value 0x79bc,0x79bc
.value 0xa58,0xa58
.value 0xa58,0xa58
.value 0xa58,0xa58
.value 0xa58,0xa58
.value 0x3f9,0x3f9
.value 0x3f9,0x3f9
.value 0x3f9,0x3f9
.value 0x3f9,0x3f9
.value 0x9258,0x9258
.value 0x9258,0x9258
.value 0x9258,0x9258
.value 0x9258,0x9258
.value 0x5ef9,0x5ef9
.value 0x5ef9,0x5ef9
.value 0x5ef9,0x5ef9
.value 0x5ef9,0x5ef9
.value 0x2dc,0x2dc
.value 0x2dc,0x2dc
.value 0x2dc,0x2dc
.value 0x2dc,0x2dc
.value 0x260,0x260
.value 0x260,0x260
.value 0x260,0x260
.value 0x260,0x260
.value 0xd6dc,0xd6dc
.value 0xd6dc,0xd6dc
.value 0xd6dc,0xd6dc
.value 0xd6dc,0xd6dc
.value 0x2260,0x2260
.value 0x2260,0x2260
.value 0x2260,0x2260
.value 0x2260,0x2260
.value 0x9ac,0x9ac
.value 0x9ac,0x9ac
.value 0xca7,0xca7
.value 0xca7,0xca7
.value 0xbf2,0xbf2
.value 0xbf2,0xbf2
.value 0x33e,0x33e
.value 0x33e,0x33e
.value 0x4dac,0x4dac
.value 0x4dac,0x4dac
.value 0x91a7,0x91a7
.value 0x91a7,0x91a7
.value 0xc1f2,0xc1f2
.value 0xc1f2,0xc1f2
.value 0xdd3e,0xdd3e
.value 0xdd3e,0xdd3e
.value 0x6b,0x6b
.value 0x6b,0x6b
.value 0x774,0x774
.value 0x774,0x774
.value 0xc0a,0xc0a
.value 0xc0a,0xc0a
.value 0x94a,0x94a
.value 0x94a,0x94a
.value 0x916b,0x916b
.value 0x916b,0x916b
.value 0x2374,0x2374
.value 0x2374,0x2374
.value 0x8a0a,0x8a0a
.value 0x8a0a,0x8a0a
.value 0x474a,0x474a
.value 0x474a,0x474a
.value 0x6fb,0x6fb
.value 0x6fb,0x6fb
.value 0x6fb,0x6fb
.value 0x6fb,0x6fb
.value 0x19b,0x19b
.value 0x19b,0x19b
.value 0x19b,0x19b
.value 0x19b,0x19b
.value 0x47fb,0x47fb
.value 0x47fb,0x47fb
.value 0x47fb,0x47fb
.value 0x47fb,0x47fb
.value 0x229b,0x229b
.value 0x229b,0x229b
.value 0x229b,0x229b
.value 0x229b,0x229b
.value 0xc34,0xc34
.value 0xc34,0xc34
.value 0xc34,0xc34
.value 0xc34,0xc34
.value 0x6de,0x6de
.value 0x6de,0x6de
.value 0x6de,0x6de
.value 0x6de,0x6de
.value 0x6834,0x6834
.value 0x6834,0x6834
.value 0x6834,0x6834
.value 0x6834,0x6834
.value 0xc0de,0xc0de
.value 0xc0de,0xc0de
.value 0xc0de,0xc0de
.value 0xc0de,0xc0de
.value 0xb73,0xb73
.value 0xb73,0xb73
.value 0x3c1,0x3c1
.value 0x3c1,0x3c1
.value 0x71d,0x71d
.value 0x71d,0x71d
.value 0xa2c,0xa2c
.value 0xa2c,0xa2c
.value 0x3473,0x3473
.value 0x3473,0x3473
.value 0x36c1,0x36c1
.value 0x36c1,0x36c1
.value 0x8e1d,0x8e1d
.value 0x8e1d,0x8e1d
.value 0xce2c,0xce2c
.value 0xce2c,0xce2c
.value 0x1c0,0x1c0
.value 0x1c0,0x1c0
.value 0x8d8,0x8d8
.value 0x8d8,0x8d8
.value 0x2a5,0x2a5
.value 0x2a5,0x2a5
.value 0x806,0x806
.value 0x806,0x806
.value 0x41c0,0x41c0
.value 0x41c0,0x41c0
.value 0x10d8,0x10d8
.value 0x10d8,0x10d8
.value 0xa1a5,0xa1a5
.value 0xa1a5,0xa1a5
.value 0xba06,0xba06
.value 0xba06,0xba06
.value 0x331,0x331
.value 0x449,0x449
.value 0x25b,0x25b
.value 0x262,0x262
.value 0x52a,0x52a
.value 0x7fc,0x7fc
.value 0x748,0x748
.value 0x180,0x180
.value 0x8631,0x8631
.value 0x4f49,0x4f49
.value 0x635b,0x635b
.value 0x862,0x862
.value 0xe32a,0xe32a
.value 0x3bfc,0x3bfc
.value 0x5f48,0x5f48
.value 0x8180,0x8180
.value 0x842,0x842
.value 0xc79,0xc79
.value 0x4c2,0x4c2
.value 0x7ca,0x7ca
.value 0x997,0x997
.value 0xdc,0xdc
.value 0x85e,0x85e
.value 0x686,0x686
.value 0xae42,0xae42
.value 0xe779,0xe779
.value 0x2ac2,0x2ac2
.value 0xc5ca,0xc5ca
.value 0x5e97,0x5e97
.value 0xd4dc,0xd4dc
.value 0x425e,0x425e
.value 0x3886,0x3886
.value 0x860,0x860
.value 0x707,0x707
.value 0x803,0x803
.value 0x31a,0x31a
.value 0x71b,0x71b
.value 0x9ab,0x9ab
.value 0x99b,0x99b
.value 0x1de,0x1de
.value 0x2860,0x2860
.value 0xac07,0xac07
.value 0xe103,0xe103
.value 0xb11a,0xb11a
.value 0xa81b,0xa81b
.value 0x5aab,0x5aab
.value 0x2a9b,0x2a9b
.value 0xbbde,0xbbde
.value 0xc95,0xc95
.value 0xbcd,0xbcd
.value 0x3e4,0x3e4
.value 0x3df,0x3df
.value 0x3be,0x3be
.value 0x74d,0x74d
.value 0x5f2,0x5f2
.value 0x65c,0x65c
.value 0x7b95,0x7b95
.value 0xa2cd,0xa2cd
.value 0x6fe4,0x6fe4
.value 0xb0df,0xb0df
.value 0x5dbe,0x5dbe
.value 0x1e4d,0x1e4d
.value 0xbbf2,0xbbf2
.value 0x5a5c,0x5a5c
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_avx2_zetas_basemul:
.value 0x8b2,0x81e
.value 0xf74e,0xf7e2
.value 0x1ae,0x367
.value 0xfe52,0xfc99
.value 0x22b,0x60e
.value 0xfdd5,0xf9f2
.value 0x34b,0x69
.value 0xfcb5,0xff97
.value 0xfeb2,0x821e
.value 0x14e,0x7de2
.value 0x2bae,0xc867
.value 0xd452,0x3799
.value 0xd32b,0x500e
.value 0x2cd5,0xaff2
.value 0x344b,0xab69
.value 0xcbb5,0x5497
.value 0x1a6,0xbde
.value 0xfe5a,0xf422
.value 0x24b,0xb35
.value 0xfdb5,0xf4cb
.value 0xb1,0x626
.value 0xff4f,0xf9da
.value 0xc16,0x675
.value 0xf3ea,0xf98b
.value 0x93a6,0xc5de
.value 0x6c5a,0x3a22
.value 0x334b,0x5a35
.value 0xccb5,0xa5cb
.value 0x3b1,0x1826
.value 0xfc4f,0xe7da
.value 0xee16,0x1575
.value 0x11ea,0xea8b
.value 0xc0b,0x9f8
.value 0xf3f5,0xf608
.value 0x30a,0x5cb
.value 0xfcf6,0xfa35
.value 0x487,0xaa7
.value 0xfb79,0xf559
.value 0xc6e,0x45f
.value 0xf392,0xfba1
.value 0x7d0b,0x71f8
.value 0x82f5,0x8e08
.value 0x810a,0xb6cb
.value 0x7ef6,0x4935
.value 0x2987,0x8fa7
.value 0xd679,0x7059
.value 0x766e,0x315f
.value 0x8992,0xcea1
.value 0x6cb,0x1a2
.value 0xf935,0xfe5e
.value 0x284,0x149
.value 0xfd7c,0xfeb7
.value 0x999,0xc65
.value 0xf667,0xf39b
.value 0x15d,0xcb6
.value 0xfea3,0xf34a
.value 0xb7cb,0xc7a2
.value 0x4835,0x385e
.value 0x4e84,0x4c49
.value 0xb17c,0xb3b7
.value 0x4499,0xeb65
.value 0xbb67,0x149b
.value 0x485d,0xceb6
.value 0xb7a3,0x314a
.value 0x331,0x52a
.value 0xfccf,0xfad6
.value 0x449,0x7fc
.value 0xfbb7,0xf804
.value 0x25b,0x748
.value 0xfda5,0xf8b8
.value 0x262,0x180
.value 0xfd9e,0xfe80
.value 0x8631,0xe32a
.value 0x79cf,0x1cd6
.value 0x4f49,0x3bfc
.value 0xb0b7,0xc404
.value 0x635b,0x5f48
.value 0x9ca5,0xa0b8
.value 0x862,0x8180
.value 0xf79e,0x7e80
.value 0x842,0x997
.value 0xf7be,0xf669
.value 0xc79,0xdc
.value 0xf387,0xff24
.value 0x4c2,0x85e
.value 0xfb3e,0xf7a2
.value 0x7ca,0x686
.value 0xf836,0xf97a
.value 0xae42,0x5e97
.value 0x51be,0xa169
.value 0xe779,0xd4dc
.value 0x1887,0x2b24
.value 0x2ac2,0x425e
.value 0xd53e,0xbda2
.value 0xc5ca,0x3886
.value 0x3a36,0xc77a
.value 0x860,0x71b
.value 0xf7a0,0xf8e5
.value 0x707,0x9ab
.value 0xf8f9,0xf655
.value 0x803,0x99b
.value 0xf7fd,0xf665
.value 0x31a,0x1de
.value 0xfce6,0xfe22
.value 0x2860,0xa81b
.value 0xd7a0,0x57e5
.value 0xac07,0x5aab
.value 0x53f9,0xa555
.value 0xe103,0x2a9b
.value 0x1efd,0xd565
.value 0xb11a,0xbbde
.value 0x4ee6,0x4422
.value 0xc95,0x3be
.value 0xf36b,0xfc42
.value 0xbcd,0x74d
.value 0xf433,0xf8b3
.value 0x3e4,0x5f2
.value 0xfc1c,0xfa0e
.value 0x3df,0x65c
.value 0xfc21,0xf9a4
.value 0x7b95,0x5dbe
.value 0x846b,0xa242
.value 0xa2cd,0x1e4d
.value 0x5d33,0xe1b3
.value 0x6fe4,0xbbf2
.value 0x901c,0x440e
.value 0xb0df,0x5a5c
.value 0x4f21,0xa5a4
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_avx2_zetas_inv:
.value 0x6a5,0x6a5
.value 0x5b4,0x5b4
.value 0x70f,0x70f
.value 0x943,0x943
.value 0x922,0x922
.value 0x134,0x134
.value 0x91d,0x91d
.value 0x6c,0x6c
.value 0xa5a5,0xa5a5
.value 0xe1b4,0xe1b4
.value 0x440f,0x440f
.value 0xa243,0xa243
.value 0x4f22,0x4f22
.value 0x5d34,0x5d34
.value 0x901d,0x901d
.value 0x846c,0x846c
.value 0xb23,0xb23
.value 0x356,0x356
.value 0x366,0x366
.value 0x5e6,0x5e6
.value 0x9e7,0x9e7
.value 0x5fa,0x5fa
.value 0x4fe,0x4fe
.value 0x4a1,0x4a1
.value 0x4423,0x4423
.value 0xa556,0xa556
.value 0xd566,0xd566
.value 0x57e6,0x57e6
.value 0x4ee7,0x4ee7
.value 0x53fa,0x53fa
.value 0x1efe,0x1efe
.value 0xd7a1,0xd7a1
.value 0x4fb,0x4fb
.value 0x4fb,0x4fb
.value 0xa5c,0xa5c
.value 0xa5c,0xa5c
.value 0x429,0x429
.value 0x429,0x429
.value 0xb41,0xb41
.value 0xb41,0xb41
.value 0x45fb,0x45fb
.value 0x45fb,0x45fb
.value 0x5e5c,0x5e5c
.value 0x5e5c,0x5e5c
.value 0xef29,0xef29
.value 0xef29,0xef29
.value 0xbe41,0xbe41
.value 0xbe41,0xbe41
.value 0x2d5,0x2d5
.value 0x2d5,0x2d5
.value 0x5e4,0x5e4
.value 0x5e4,0x5e4
.value 0x940,0x940
.value 0x940,0x940
.value 0x18e,0x18e
.value 0x18e,0x18e
.value 0x31d5,0x31d5
.value 0x31d5,0x31d5
.value 0x71e4,0x71e4
.value 0x71e4,0x71e4
.value 0xc940,0xc940
.value 0xc940,0xc940
.value 0xcb8e,0xcb8e
.value 0xcb8e,0xcb8e
.value 0x623,0x623
.value 0x623,0x623
.value 0x623,0x623
.value 0x623,0x623
.value 0xcd,0xcd
.value 0xcd,0xcd
.value 0xcd,0xcd
.value 0xcd,0xcd
.value 0x3f23,0x3f23
.value 0x3f23,0x3f23
.value 0x3f23,0x3f23
.value 0x3f23,0x3f23
.value 0x97cd,0x97cd
.value 0x97cd,0x97cd
.value 0x97cd,0x97cd
.value 0x97cd,0x97cd
.value 0xb66,0xb66
.value 0xb66,0xb66
.value 0xb66,0xb66
.value 0xb66,0xb66
.value 0x606,0x606
.value 0x606,0x606
.value 0x606,0x606
.value 0x606,0x606
.value 0xdd66,0xdd66
.value 0xdd66,0xdd66
.value 0xdd66,0xdd66
.value 0xdd66,0xdd66
.value 0xb806,0xb806
.value 0xb806,0xb806
.value 0xb806,0xb806
.value 0xb806,0xb806
.value 0x745,0x745
.value 0x745,0x745
.value 0x745,0x745
.value 0x745,0x745
.value 0x745,0x745
.value 0x745,0x745
.value 0x745,0x745
.value 0x745,0x745
.value 0x8645,0x8645
.value 0x8645,0x8645
.value 0x8645,0x8645
.value 0x8645,0x8645
.value 0x8645,0x8645
.value 0x8645,0x8645
.value 0x8645,0x8645
.value 0x8645,0x8645
.value 0x5c2,0x5c2
.value 0x5c2,0x5c2
.value 0x5c2,0x5c2
.value 0x5c2,0x5c2
.value 0x5c2,0x5c2
.value 0x5c2,0x5c2
.value 0x5c2,0x5c2
.value 0x5c2,0x5c2
.value 0x2bc2,0x2bc2
.value 0x2bc2,0x2bc2
.value 0x2bc2,0x2bc2
.value 0x2bc2,0x2bc2
.value 0x2bc2,0x2bc2
.value 0x2bc2,0x2bc2
.value 0x2bc2,0x2bc2
.value 0x2bc2,0x2bc2
.value 0xc37,0xc37
.value 0xc37,0xc37
.value 0xc37,0xc37
.value 0xc37,0xc37
.value 0xc37,0xc37
.value 0xc37,0xc37
.value 0xc37,0xc37
.value 0xc37,0xc37
.value 0x4137,0x4137
.value 0x4137,0x4137
.value 0x4137,0x4137
.value 0x4137,0x4137
.value 0x4137,0x4137
.value 0x4137,0x4137
.value 0x4137,0x4137
.value 0x4137,0x4137
.value 0x67b,0x67b
.value 0xc25,0xc25
.value 0x4a3,0x4a3
.value 0x36a,0x36a
.value 0x537,0x537
.value 0x88,0x88
.value 0x83f,0x83f
.value 0x4bf,0x4bf
.value 0xc77b,0xc77b
.value 0x2b25,0x2b25
.value 0xbda3,0xbda3
.value 0xa16a,0xa16a
.value 0x3a37,0x3a37
.value 0x1888,0x1888
.value 0xd53f,0xd53f
.value 0x51bf,0x51bf
.value 0xb81,0xb81
.value 0x505,0x505
.value 0x5b9,0x5b9
.value 0x7d7,0x7d7
.value 0xa9f,0xa9f
.value 0x8b8,0x8b8
.value 0xaa6,0xaa6
.value 0x9d0,0x9d0
.value 0x7e81,0x7e81
.value 0xc405,0xc405
.value 0xa0b9,0xa0b9
.value 0x1cd7,0x1cd7
.value 0xf79f,0xf79f
.value 0xb0b8,0xb0b8
.value 0x9ca6,0x9ca6
.value 0x79d0,0x79d0
.value 0x3b7,0x3b7
.value 0x3b7,0x3b7
.value 0xf7,0xf7
.value 0xf7,0xf7
.value 0x58d,0x58d
.value 0x58d,0x58d
.value 0xc96,0xc96
.value 0xc96,0xc96
.value 0xb8b7,0xb8b7
.value 0xb8b7,0xb8b7
.value 0x75f7,0x75f7
.value 0x75f7,0x75f7
.value 0xdc8d,0xdc8d
.value 0xdc8d,0xdc8d
.value 0x6e96,0x6e96
.value 0x6e96,0x6e96
.value 0x9c3,0x9c3
.value 0x9c3,0x9c3
.value 0x10f,0x10f
.value 0x10f,0x10f
.value 0x5a,0x5a
.value 0x5a,0x5a
.value 0x355,0x355
.value 0x355,0x355
.value 0x22c3,0x22c3
.value 0x22c3,0x22c3
.value 0x3e0f,0x3e0f
.value 0x3e0f,0x3e0f
.value 0x6e5a,0x6e5a
.value 0x6e5a,0x6e5a
.value 0xb255,0xb255
.value 0xb255,0xb255
.value 0xaa1,0xaa1
.value 0xaa1,0xaa1
.value 0xaa1,0xaa1
.value 0xaa1,0xaa1
.value 0xa25,0xa25
.value 0xa25,0xa25
.value 0xa25,0xa25
.value 0xa25,0xa25
.value 0xdda1,0xdda1
.value 0xdda1,0xdda1
.value 0xdda1,0xdda1
.value 0xdda1,0xdda1
.value 0x2925,0x2925
.value 0x2925,0x2925
.value 0x2925,0x2925
.value 0x2925,0x2925
.value 0x908,0x908
.value 0x908,0x908
.value 0x908,0x908
.value 0x908,0x908
.value 0x2a9,0x2a9
.value 0x2a9,0x2a9
.value 0x2a9,0x2a9
.value 0x2a9,0x2a9
.value 0xa108,0xa108
.value 0xa108,0xa108
.value 0xa108,0xa108
.value 0xa108,0xa108
.value 0x6da9,0x6da9
.value 0x6da9,0x6da9
.value 0x6da9,0x6da9
.value 0x6da9,0x6da9
.value 0x4b2,0x4b2
.value 0x4b2,0x4b2
.value 0x4b2,0x4b2
.value 0x4b2,0x4b2
.value 0x4b2,0x4b2
.value 0x4b2,0x4b2
.value 0x4b2,0x4b2
.value 0x4b2,0x4b2
.value 0xfab2,0xfab2
.value 0xfab2,0xfab2
.value 0xfab2,0xfab2
.value 0xfab2,0xfab2
.value 0xfab2,0xfab2
.value 0xfab2,0xfab2
.value 0xfab2,0xfab2
.value 0xfab2,0xfab2
.value 0x93f,0x93f
.value 0x93f,0x93f
.value 0x93f,0x93f
.value 0x93f,0x93f
.value 0x93f,0x93f
.value 0x93f,0x93f
.value 0x93f,0x93f
.value 0x93f,0x93f
.value 0xd63f,0xd63f
.value 0xd63f,0xd63f
.value 0xd63f,0xd63f
.value 0xd63f,0xd63f
.value 0xd63f,0xd63f
.value 0xd63f,0xd63f
.value 0xd63f,0xd63f
.value 0xd63f,0xd63f
.value 0xbe2,0xbe2
.value 0xbe2,0xbe2
.value 0xbe2,0xbe2
.value 0xbe2,0xbe2
.value 0xbe2,0xbe2
.value 0xbe2,0xbe2
.value 0xbe2,0xbe2
.value 0xbe2,0xbe2
.value 0x91e2,0x91e2
.value 0x91e2,0x91e2
.value 0x91e2,0x91e2
.value 0x91e2,0x91e2
.value 0x91e2,0x91e2
.value 0x91e2,0x91e2
.value 0x91e2,0x91e2
.value 0x91e2,0x91e2
.value 0x5ed,0x5ed
.value 0x5ed,0x5ed
.value 0x5ed,0x5ed
.value 0x5ed,0x5ed
.value 0x5ed,0x5ed
.value 0x5ed,0x5ed
.value 0x5ed,0x5ed
.value 0x5ed,0x5ed
.value 0xfced,0xfced
.value 0xfced,0xfced
.value 0xfced,0xfced
.value 0xfced,0xfced
.value 0xfced,0xfced
.value 0xfced,0xfced
.value 0xfced,0xfced
.value 0xfced,0xfced
.value 0x4b,0x4b
.value 0xbb8,0xbb8
.value 0x9c,0x9c
.value 0xb5f,0xb5f
.value 0xba4,0xba4
.value 0xa7d,0xa7d
.value 0x368,0x368
.value 0x636,0x636
.value 0x314b,0x314b
.value 0xb3b8,0xb3b8
.value 0x149c,0x149c
.value 0x385f,0x385f
.value 0xb7a4,0xb7a4
.value 0xb17d,0xb17d
.value 0xbb68,0xbb68
.value 0x4836,0x4836
.value 0x8a2,0x8a2
.value 0x736,0x736
.value 0x25a,0x25a
.value 0x309,0x309
.value 0x93,0x93
.value 0x9f7,0x9f7
.value 0x87a,0x87a
.value 0xf6,0xf6
.value 0xcea2,0xcea2
.value 0x4936,0x4936
.value 0x705a,0x705a
.value 0x8e09,0x8e09
.value 0x8993,0x8993
.value 0x7ef7,0x7ef7
.value 0xd67a,0xd67a
.value 0x82f6,0x82f6
.value 0x744,0x744
.value 0x744,0x744
.value 0xc83,0xc83
.value 0xc83,0xc83
.value 0x48a,0x48a
.value 0x48a,0x48a
.value 0x652,0x652
.value 0x652,0x652
.value 0x9344,0x9344
.value 0x9344,0x9344
.value 0x6583,0x6583
.value 0x6583,0x6583
.value 0x28a,0x28a
.value 0x28a,0x28a
.value 0xdc52,0xdc52
.value 0xdc52,0xdc52
.value 0x29a,0x29a
.value 0x29a,0x29a
.value 0x140,0x140
.value 0x140,0x140
.value 0x8,0x8
.value 0x8,0x8
.value 0xafd,0xafd
.value 0xafd,0xafd
.value 0x309a,0x309a
.value 0x309a,0x309a
.value 0xc140,0xc140
.value 0xc140,0xc140
.value 0x9808,0x9808
.value 0x9808,0x9808
.value 0x31fd,0x31fd
.value 0x31fd,0x31fd
.value 0x82,0x82
.value 0x82,0x82
.value 0x82,0x82
.value 0x82,0x82
.value 0x642,0x642
.value 0x642,0x642
.value 0x642,0x642
.value 0x642,0x642
.value 0x6682,0x6682
.value 0x6682,0x6682
.value 0x6682,0x6682
.value 0x6682,0x6682
.value 0xac42,0xac42
.value 0xac42,0xac42
.value 0xac42,0xac42
.value 0xac42,0xac42
.value 0x74f,0x74f
.value 0x74f,0x74f
.value 0x74f,0x74f
.value 0x74f,0x74f
.value 0x33d,0x33d
.value 0x33d,0x33d
.value 0x33d,0x33d
.value 0x33d,0x33d
.value 0x44f,0x44f
.value 0x44f,0x44f
.value 0x44f,0x44f
.value 0x44f,0x44f
.value 0xea3d,0xea3d
.value 0xea3d,0xea3d
.value 0xea3d,0xea3d
.value 0xea3d,0xea3d
.value 0xc4b,0xc4b
.value 0xc4b,0xc4b
.value 0xc4b,0xc4b
.value 0xc4b,0xc4b
.value 0xc4b,0xc4b
.value 0xc4b,0xc4b
.value 0xc4b,0xc4b
.value 0xc4b,0xc4b
.value 0x3d4b,0x3d4b
.value 0x3d4b,0x3d4b
.value 0x3d4b,0x3d4b
.value 0x3d4b,0x3d4b
.value 0x3d4b,0x3d4b
.value 0x3d4b,0x3d4b
.value 0x3d4b,0x3d4b
.value 0x3d4b,0x3d4b
.value 0x6d8,0x6d8
.value 0x6d8,0x6d8
.value 0x6d8,0x6d8
.value 0x6d8,0x6d8
.value 0x6d8,0x6d8
.value 0x6d8,0x6d8
.value 0x6d8,0x6d8
.value 0x6d8,0x6d8
.value 0xed8,0xed8
.value 0xed8,0xed8
.value 0xed8,0xed8
.value 0xed8,0xed8
.value 0xed8,0xed8
.value 0xed8,0xed8
.value 0xed8,0xed8
.value 0xed8,0xed8
.value 0x773,0x773
.value 0x773,0x773
.value 0x773,0x773
.value 0x773,0x773
.value 0x773,0x773
.value 0x773,0x773
.value 0x773,0x773
.value 0x773,0x773
.value 0x3073,0x3073
.value 0x3073,0x3073
.value 0x3073,0x3073
.value 0x3073,0x3073
.value 0x3073,0x3073
.value 0x3073,0x3073
.value 0x3073,0x3073
.value 0x3073,0x3073
.value 0x68c,0x68c
.value 0x1cc,0x1cc
.value 0x6db,0x6db
.value 0x123,0x123
.value 0xeb,0xeb
.value 0xab6,0xab6
.value 0xc50,0xc50
.value 0xb5b,0xb5b
.value 0xea8c,0xea8c
.value 0xa5cc,0xa5cc
.value 0xe7db,0xe7db
.value 0x3a23,0x3a23
.value 0x11eb,0x11eb
.value 0xccb6,0xccb6
.value 0xfc50,0xfc50
.value 0x6c5b,0x6c5b
.value 0xc98,0xc98
.value 0x99a,0x99a
.value 0x6f3,0x6f3
.value 0x4e3,0x4e3
.value 0x9b6,0x9b6
.value 0xb53,0xb53
.value 0xad6,0xad6
.value 0x44f,0x44f
.value 0x5498,0x5498
.value 0x379a,0x379a
.value 0xaff3,0xaff3
.value 0x7de3,0x7de3
.value 0xcbb6,0xcbb6
.value 0xd453,0xd453
.value 0x2cd6,0x2cd6
.value 0x14f,0x14f
.value 0x608,0x608
.value 0x608,0x608
.value 0x11a,0x11a
.value 0x11a,0x11a
.value 0x72e,0x72e
.value 0x72e,0x72e
.value 0x50d,0x50d
.value 0x50d,0x50d
.value 0x9e08,0x9e08
.value 0x9e08,0x9e08
.value 0xaf1a,0xaf1a
.value 0xaf1a,0xaf1a
.value 0xb12e,0xb12e
.value 0xb12e,0xb12e
.value 0x5c0d,0x5c0d
.value 0x5c0d,0x5c0d
.value 0x90a,0x90a
.value 0x90a,0x90a
.value 0x228,0x228
.value 0x228,0x228
.value 0xa75,0xa75
.value 0xa75,0xa75
.value 0x83a,0x83a
.value 0x83a,0x83a
.value 0x870a,0x870a
.value 0x870a,0x870a
.value 0xfa28,0xfa28
.value 0xfa28,0xfa28
.value 0x1975,0x1975
.value 0x1975,0x1975
.value 0x163a,0x163a
.value 0x163a,0x163a
.value 0xb82,0xb82
.value 0xb82,0xb82
.value 0xb82,0xb82
.value 0xb82,0xb82
.value 0xbf9,0xbf9
.value 0xbf9,0xbf9
.value 0xbf9,0xbf9
.value 0xbf9,0xbf9
.value 0x7182,0x7182
.value 0x7182,0x7182
.value 0x7182,0x7182
.value 0x7182,0x7182
.value 0x66f9,0x66f9
.value 0x66f9,0x66f9
.value 0x66f9,0x66f9
.value 0x66f9,0x66f9
.value 0x52d,0x52d
.value 0x52d,0x52d
.value 0x52d,0x52d
.value 0x52d,0x52d
.value 0xac4,0xac4
.value 0xac4,0xac4
.value 0xac4,0xac4
.value 0xac4,0xac4
.value 0xbc2d,0xbc2d
.value 0xbc2d,0xbc2d
.value 0xbc2d,0xbc2d
.value 0xbc2d,0xbc2d
.value 0x16c4,0x16c4
.value 0x16c4,0x16c4
.value 0x16c4,0x16c4
.value 0x16c4,0x16c4
.value 0xa93,0xa93
.value 0xa93,0xa93
.value 0xa93,0xa93
.value 0xa93,0xa93
.value 0xa93,0xa93
.value 0xa93,0xa93
.value 0xa93,0xa93
.value 0xa93,0xa93
.value 0x9393,0x9393
.value 0x9393,0x9393
.value 0x9393,0x9393
.value 0x9393,0x9393
.value 0x9393,0x9393
.value 0x9393,0x9393
.value 0x9393,0x9393
.value 0x9393,0x9393
.value 0xab,0xab
.value 0xab,0xab
.value 0xab,0xab
.value 0xab,0xab
.value 0xab,0xab
.value 0xab,0xab
.value 0xab,0xab
.value 0xab,0xab
.value 0x51ab,0x51ab
.value 0x51ab,0x51ab
.value 0x51ab,0x51ab
.value 0x51ab,0x51ab
.value 0x51ab,0x51ab
.value 0x51ab,0x51ab
.value 0x51ab,0x51ab
.value 0x51ab,0x51ab
.value 0x72c,0x72c
.value 0x72c,0x72c
.value 0x72c,0x72c
.value 0x72c,0x72c
.value 0x72c,0x72c
.value 0x72c,0x72c
.value 0x72c,0x72c
.value 0x72c,0x72c
.value 0xcb2c,0xcb2c
.value 0xcb2c,0xcb2c
.value 0xcb2c,0xcb2c
.value 0xcb2c,0xcb2c
.value 0xcb2c,0xcb2c
.value 0xcb2c,0xcb2c
.value 0xcb2c,0xcb2c
.value 0xcb2c,0xcb2c
.value 0x167,0x167
.value 0x167,0x167
.value 0x167,0x167
.value 0x167,0x167
.value 0x167,0x167
.value 0x167,0x167
.value 0x167,0x167
.value 0x167,0x167
.value 0xc667,0xc667
.value 0xc667,0xc667
.value 0xc667,0xc667
.value 0xc667,0xc667
.value 0xc667,0xc667
.value 0xc667,0xc667
.value 0xc667,0xc667
.value 0xc667,0xc667
.value 0x2f6,0x2f6
.value 0x2f6,0x2f6
.value 0x2f6,0x2f6
.value 0x2f6,0x2f6
.value 0x2f6,0x2f6
.value 0x2f6,0x2f6
.value 0x2f6,0x2f6
.value 0x2f6,0x2f6
.value 0x84f6,0x84f6
.value 0x84f6,0x84f6
.value 0x84f6,0x84f6
.value 0x84f6,0x84f6
.value 0x84f6,0x84f6
.value 0x84f6,0x84f6
.value 0x84f6,0x84f6
.value 0x84f6,0x84f6
.value 0x5a1,0x5a1
.value 0x5a1,0x5a1
.value 0x5a1,0x5a1
.value 0x5a1,0x5a1
.value 0x5a1,0x5a1
.value 0x5a1,0x5a1
.value 0x5a1,0x5a1
.value 0x5a1,0x5a1
.value 0xd8a1,0xd8a1
.value 0xd8a1,0xd8a1
.value 0xd8a1,0xd8a1
.value 0xd8a1,0xd8a1
.value 0xd8a1,0xd8a1
.value 0xd8a1,0xd8a1
.value 0xd8a1,0xd8a1
.value 0xd8a1,0xd8a1
#ifndef __APPLE__
.text
.globl kyber_keygen_avx2
.type kyber_keygen_avx2,@function
.align 16
kyber_keygen_avx2:
#else
.section __TEXT,__text
.globl _kyber_keygen_avx2
.p2align 4
_kyber_keygen_avx2:
#endif /* __APPLE__ */
vmovdqu kyber_q(%rip), %ymm14
vmovdqu kyber_v(%rip), %ymm15
movq %r8, %r9
movq %rdi, %r10
L_kyber_keygen_avx2_priv:
# ntt
leaq L_kyber_avx2_zetas(%rip), %r11
vmovdqu (%r11), %ymm10
vmovdqu 32(%r11), %ymm12
vmovdqu 128(%r10), %ymm0
vmovdqu 160(%r10), %ymm1
vmovdqu 192(%r10), %ymm2
vmovdqu 224(%r10), %ymm3
vmovdqu 384(%r10), %ymm4
vmovdqu 416(%r10), %ymm5
vmovdqu 448(%r10), %ymm6
vmovdqu 480(%r10), %ymm7
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
vmovdqu %ymm0, 128(%r10)
vmovdqu %ymm1, 160(%r10)
vmovdqu %ymm2, 192(%r10)
vmovdqu %ymm3, 224(%r10)
vmovdqu %ymm4, 384(%r10)
vmovdqu %ymm5, 416(%r10)
vmovdqu %ymm6, 448(%r10)
vmovdqu %ymm7, 480(%r10)
vmovdqu (%r10), %ymm0
vmovdqu 32(%r10), %ymm1
vmovdqu 64(%r10), %ymm2
vmovdqu 96(%r10), %ymm3
vmovdqu 256(%r10), %ymm4
vmovdqu 288(%r10), %ymm5
vmovdqu 320(%r10), %ymm6
vmovdqu 352(%r10), %ymm7
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
vmovdqu %ymm4, 256(%r10)
vmovdqu %ymm5, 288(%r10)
vmovdqu %ymm6, 320(%r10)
vmovdqu %ymm7, 352(%r10)
vmovdqu 128(%r10), %ymm4
vmovdqu 160(%r10), %ymm5
vmovdqu 192(%r10), %ymm6
vmovdqu 224(%r10), %ymm7
# 64: 0/3
vmovdqu 64(%r11), %ymm10
vmovdqu 96(%r11), %ymm12
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
# 32: 0/3
vmovdqu 128(%r11), %ymm10
vmovdqu 160(%r11), %ymm12
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm2
vpsubw %ymm9, %ymm1, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
# 32: 0/3
vmovdqu 192(%r11), %ymm10
vmovdqu 224(%r11), %ymm12
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm6
vpsubw %ymm9, %ymm5, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm5, %ymm5
# 16: 0/3
vmovdqu 256(%r11), %ymm10
vmovdqu 288(%r11), %ymm12
vmovdqu 320(%r11), %ymm11
vmovdqu 352(%r11), %ymm13
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 16: 0/3
vmovdqu 384(%r11), %ymm10
vmovdqu 416(%r11), %ymm12
vmovdqu 448(%r11), %ymm11
vmovdqu 480(%r11), %ymm13
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 8: 0/3
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu 512(%r11), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm1
vmovdqu 544(%r11), %ymm12
vperm2i128 $32, %ymm3, %ymm2, %ymm9
vmovdqu 576(%r11), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm3
vmovdqu 608(%r11), %ymm13
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm0
vpsubw %ymm2, %ymm3, %ymm2
vpsubw %ymm0, %ymm8, %ymm1
vpsubw %ymm2, %ymm9, %ymm3
vpaddw %ymm0, %ymm8, %ymm8
vpaddw %ymm2, %ymm9, %ymm9
# 4: 0/3
vmovdqu 640(%r11), %ymm10
vmovdqu 672(%r11), %ymm12
vmovdqu 704(%r11), %ymm11
vmovdqu 736(%r11), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 8: 0/3
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 768(%r11), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm5
vmovdqu 800(%r11), %ymm12
vperm2i128 $32, %ymm7, %ymm6, %ymm9
vmovdqu 832(%r11), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm7
vmovdqu 864(%r11), %ymm13
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm4
vpsubw %ymm6, %ymm7, %ymm6
vpsubw %ymm4, %ymm8, %ymm5
vpsubw %ymm6, %ymm9, %ymm7
vpaddw %ymm4, %ymm8, %ymm8
vpaddw %ymm6, %ymm9, %ymm9
# 4: 0/3
vmovdqu 896(%r11), %ymm10
vmovdqu 928(%r11), %ymm12
vmovdqu 960(%r11), %ymm11
vmovdqu 992(%r11), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 2: 0/3
vmovdqu 1024(%r11), %ymm10
vmovdqu 1056(%r11), %ymm12
vmovdqu 1088(%r11), %ymm11
vmovdqu 1120(%r11), %ymm13
vpsllq $32, %ymm1, %ymm8
vpsrlq $32, %ymm0, %ymm9
vpblendd $0xaa, %ymm8, %ymm0, %ymm0
vpblendd $0x55, %ymm9, %ymm1, %ymm1
vpsllq $32, %ymm3, %ymm8
vpsrlq $32, %ymm2, %ymm9
vpblendd $0xaa, %ymm8, %ymm2, %ymm2
vpblendd $0x55, %ymm9, %ymm3, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 2: 0/3
vmovdqu 1152(%r11), %ymm10
vmovdqu 1184(%r11), %ymm12
vmovdqu 1216(%r11), %ymm11
vmovdqu 1248(%r11), %ymm13
vpsllq $32, %ymm5, %ymm8
vpsrlq $32, %ymm4, %ymm9
vpblendd $0xaa, %ymm8, %ymm4, %ymm4
vpblendd $0x55, %ymm9, %ymm5, %ymm5
vpsllq $32, %ymm7, %ymm8
vpsrlq $32, %ymm6, %ymm9
vpblendd $0xaa, %ymm8, %ymm6, %ymm6
vpblendd $0x55, %ymm9, %ymm7, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
vpunpckldq %ymm1, %ymm0, %ymm8
vpunpckhdq %ymm1, %ymm0, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm0
vperm2i128 $49, %ymm9, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm2, %ymm8
vpunpckhdq %ymm3, %ymm2, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm2
vperm2i128 $49, %ymm9, %ymm8, %ymm3
vpunpckldq %ymm5, %ymm4, %ymm8
vpunpckhdq %ymm5, %ymm4, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm4
vperm2i128 $49, %ymm9, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm6, %ymm8
vpunpckhdq %ymm7, %ymm6, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm6
vperm2i128 $49, %ymm9, %ymm8, %ymm7
vpmulhw %ymm15, %ymm0, %ymm8
vpmulhw %ymm15, %ymm1, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm0, %ymm8
vpsubw %ymm9, %ymm1, %ymm9
vmovdqu %ymm8, (%r10)
vmovdqu %ymm9, 32(%r10)
vpmulhw %ymm15, %ymm2, %ymm8
vpmulhw %ymm15, %ymm3, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vmovdqu %ymm8, 64(%r10)
vmovdqu %ymm9, 96(%r10)
vpmulhw %ymm15, %ymm4, %ymm8
vpmulhw %ymm15, %ymm5, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vmovdqu %ymm8, 128(%r10)
vmovdqu %ymm9, 160(%r10)
vpmulhw %ymm15, %ymm6, %ymm8
vpmulhw %ymm15, %ymm7, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vmovdqu %ymm8, 192(%r10)
vmovdqu %ymm9, 224(%r10)
vmovdqu 256(%r10), %ymm0
vmovdqu 288(%r10), %ymm1
vmovdqu 320(%r10), %ymm2
vmovdqu 352(%r10), %ymm3
vmovdqu 384(%r10), %ymm4
vmovdqu 416(%r10), %ymm5
vmovdqu 448(%r10), %ymm6
vmovdqu 480(%r10), %ymm7
# 64: 1/3
vmovdqu 1280(%r11), %ymm10
vmovdqu 1312(%r11), %ymm12
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
# 32: 1/3
vmovdqu 1344(%r11), %ymm10
vmovdqu 1376(%r11), %ymm12
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm2
vpsubw %ymm9, %ymm1, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
# 32: 1/3
vmovdqu 1408(%r11), %ymm10
vmovdqu 1440(%r11), %ymm12
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm6
vpsubw %ymm9, %ymm5, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm5, %ymm5
# 16: 1/3
vmovdqu 1472(%r11), %ymm10
vmovdqu 1504(%r11), %ymm12
vmovdqu 1536(%r11), %ymm11
vmovdqu 1568(%r11), %ymm13
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 16: 1/3
vmovdqu 1600(%r11), %ymm10
vmovdqu 1632(%r11), %ymm12
vmovdqu 1664(%r11), %ymm11
vmovdqu 1696(%r11), %ymm13
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 8: 1/3
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu 1728(%r11), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm1
vmovdqu 1760(%r11), %ymm12
vperm2i128 $32, %ymm3, %ymm2, %ymm9
vmovdqu 1792(%r11), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm3
vmovdqu 1824(%r11), %ymm13
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm0
vpsubw %ymm2, %ymm3, %ymm2
vpsubw %ymm0, %ymm8, %ymm1
vpsubw %ymm2, %ymm9, %ymm3
vpaddw %ymm0, %ymm8, %ymm8
vpaddw %ymm2, %ymm9, %ymm9
# 4: 1/3
vmovdqu 1856(%r11), %ymm10
vmovdqu 1888(%r11), %ymm12
vmovdqu 1920(%r11), %ymm11
vmovdqu 1952(%r11), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 8: 1/3
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 1984(%r11), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm5
vmovdqu 2016(%r11), %ymm12
vperm2i128 $32, %ymm7, %ymm6, %ymm9
vmovdqu 2048(%r11), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm7
vmovdqu 2080(%r11), %ymm13
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm4
vpsubw %ymm6, %ymm7, %ymm6
vpsubw %ymm4, %ymm8, %ymm5
vpsubw %ymm6, %ymm9, %ymm7
vpaddw %ymm4, %ymm8, %ymm8
vpaddw %ymm6, %ymm9, %ymm9
# 4: 1/3
vmovdqu 2112(%r11), %ymm10
vmovdqu 2144(%r11), %ymm12
vmovdqu 2176(%r11), %ymm11
vmovdqu 2208(%r11), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 2: 1/3
vmovdqu 2240(%r11), %ymm10
vmovdqu 2272(%r11), %ymm12
vmovdqu 2304(%r11), %ymm11
vmovdqu 2336(%r11), %ymm13
vpsllq $32, %ymm1, %ymm8
vpsrlq $32, %ymm0, %ymm9
vpblendd $0xaa, %ymm8, %ymm0, %ymm0
vpblendd $0x55, %ymm9, %ymm1, %ymm1
vpsllq $32, %ymm3, %ymm8
vpsrlq $32, %ymm2, %ymm9
vpblendd $0xaa, %ymm8, %ymm2, %ymm2
vpblendd $0x55, %ymm9, %ymm3, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 2: 1/3
vmovdqu 2368(%r11), %ymm10
vmovdqu 2400(%r11), %ymm12
vmovdqu 2432(%r11), %ymm11
vmovdqu 2464(%r11), %ymm13
vpsllq $32, %ymm5, %ymm8
vpsrlq $32, %ymm4, %ymm9
vpblendd $0xaa, %ymm8, %ymm4, %ymm4
vpblendd $0x55, %ymm9, %ymm5, %ymm5
vpsllq $32, %ymm7, %ymm8
vpsrlq $32, %ymm6, %ymm9
vpblendd $0xaa, %ymm8, %ymm6, %ymm6
vpblendd $0x55, %ymm9, %ymm7, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
vpunpckldq %ymm1, %ymm0, %ymm8
vpunpckhdq %ymm1, %ymm0, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm0
vperm2i128 $49, %ymm9, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm2, %ymm8
vpunpckhdq %ymm3, %ymm2, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm2
vperm2i128 $49, %ymm9, %ymm8, %ymm3
vpunpckldq %ymm5, %ymm4, %ymm8
vpunpckhdq %ymm5, %ymm4, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm4
vperm2i128 $49, %ymm9, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm6, %ymm8
vpunpckhdq %ymm7, %ymm6, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm6
vperm2i128 $49, %ymm9, %ymm8, %ymm7
vpmulhw %ymm15, %ymm0, %ymm8
vpmulhw %ymm15, %ymm1, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm0, %ymm8
vpsubw %ymm9, %ymm1, %ymm9
vmovdqu %ymm8, 256(%r10)
vmovdqu %ymm9, 288(%r10)
vpmulhw %ymm15, %ymm2, %ymm8
vpmulhw %ymm15, %ymm3, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vmovdqu %ymm8, 320(%r10)
vmovdqu %ymm9, 352(%r10)
vpmulhw %ymm15, %ymm4, %ymm8
vpmulhw %ymm15, %ymm5, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vmovdqu %ymm8, 384(%r10)
vmovdqu %ymm9, 416(%r10)
vpmulhw %ymm15, %ymm6, %ymm8
vpmulhw %ymm15, %ymm7, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vmovdqu %ymm8, 448(%r10)
vmovdqu %ymm9, 480(%r10)
addq $0x200, %r10
subq $0x01, %r9
jg L_kyber_keygen_avx2_priv
vmovdqu kyber_qinv(%rip), %ymm13
movq %r8, %rax
movq %rsi, %r10
L_kyber_keygen_avx2_acc:
# Pointwise acc mont
movq %r8, %r9
# Base mul mont
leaq L_kyber_avx2_zetas_basemul(%rip), %r11
vmovdqu (%rcx), %ymm2
vmovdqu 32(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu (%rdi), %ymm4
vmovdqu 32(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu (%r11), %ymm10
vmovdqu 32(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, (%r10)
vmovdqu %ymm1, 32(%r10)
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 64(%rdi), %ymm4
vmovdqu 96(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 64(%r11), %ymm10
vmovdqu 96(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 64(%r10)
vmovdqu %ymm1, 96(%r10)
vmovdqu 128(%rcx), %ymm2
vmovdqu 160(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 128(%rdi), %ymm4
vmovdqu 160(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 128(%r11), %ymm10
vmovdqu 160(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 128(%r10)
vmovdqu %ymm1, 160(%r10)
vmovdqu 192(%rcx), %ymm2
vmovdqu 224(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 192(%rdi), %ymm4
vmovdqu 224(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 192(%r11), %ymm10
vmovdqu 224(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 192(%r10)
vmovdqu %ymm1, 224(%r10)
vmovdqu 256(%rcx), %ymm2
vmovdqu 288(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 256(%rdi), %ymm4
vmovdqu 288(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 256(%r11), %ymm10
vmovdqu 288(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 256(%r10)
vmovdqu %ymm1, 288(%r10)
vmovdqu 320(%rcx), %ymm2
vmovdqu 352(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 320(%rdi), %ymm4
vmovdqu 352(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 320(%r11), %ymm10
vmovdqu 352(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 320(%r10)
vmovdqu %ymm1, 352(%r10)
vmovdqu 384(%rcx), %ymm2
vmovdqu 416(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 384(%rdi), %ymm4
vmovdqu 416(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 384(%r11), %ymm10
vmovdqu 416(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 384(%r10)
vmovdqu %ymm1, 416(%r10)
vmovdqu 448(%rcx), %ymm2
vmovdqu 480(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 448(%rdi), %ymm4
vmovdqu 480(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 448(%r11), %ymm10
vmovdqu 480(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 448(%r10)
vmovdqu %ymm1, 480(%r10)
addq $0x200, %rcx
addq $0x200, %rdi
subq $2, %r9
jz L_pointwise_acc_mont_end_keygen
L_pointwise_acc_mont_start_keygen:
# Base mul mont add
leaq L_kyber_avx2_zetas_basemul(%rip), %r11
vmovdqu (%rcx), %ymm2
vmovdqu 32(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu (%rdi), %ymm4
vmovdqu 32(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu (%r11), %ymm10
vmovdqu 32(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu (%r10), %ymm6
vmovdqu 32(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, (%r10)
vmovdqu %ymm1, 32(%r10)
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 64(%rdi), %ymm4
vmovdqu 96(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 64(%r11), %ymm10
vmovdqu 96(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 64(%r10), %ymm6
vmovdqu 96(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 64(%r10)
vmovdqu %ymm1, 96(%r10)
vmovdqu 128(%rcx), %ymm2
vmovdqu 160(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 128(%rdi), %ymm4
vmovdqu 160(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 128(%r11), %ymm10
vmovdqu 160(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 128(%r10), %ymm6
vmovdqu 160(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 128(%r10)
vmovdqu %ymm1, 160(%r10)
vmovdqu 192(%rcx), %ymm2
vmovdqu 224(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 192(%rdi), %ymm4
vmovdqu 224(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 192(%r11), %ymm10
vmovdqu 224(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 192(%r10), %ymm6
vmovdqu 224(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 192(%r10)
vmovdqu %ymm1, 224(%r10)
vmovdqu 256(%rcx), %ymm2
vmovdqu 288(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 256(%rdi), %ymm4
vmovdqu 288(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 256(%r11), %ymm10
vmovdqu 288(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 256(%r10), %ymm6
vmovdqu 288(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 256(%r10)
vmovdqu %ymm1, 288(%r10)
vmovdqu 320(%rcx), %ymm2
vmovdqu 352(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 320(%rdi), %ymm4
vmovdqu 352(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 320(%r11), %ymm10
vmovdqu 352(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 320(%r10), %ymm6
vmovdqu 352(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 320(%r10)
vmovdqu %ymm1, 352(%r10)
vmovdqu 384(%rcx), %ymm2
vmovdqu 416(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 384(%rdi), %ymm4
vmovdqu 416(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 384(%r11), %ymm10
vmovdqu 416(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 384(%r10), %ymm6
vmovdqu 416(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 384(%r10)
vmovdqu %ymm1, 416(%r10)
vmovdqu 448(%rcx), %ymm2
vmovdqu 480(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 448(%rdi), %ymm4
vmovdqu 480(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 448(%r11), %ymm10
vmovdqu 480(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 448(%r10), %ymm6
vmovdqu 480(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 448(%r10)
vmovdqu %ymm1, 480(%r10)
addq $0x200, %rcx
addq $0x200, %rdi
subq $0x01, %r9
jg L_pointwise_acc_mont_start_keygen
L_pointwise_acc_mont_end_keygen:
# Base mul mont add
leaq L_kyber_avx2_zetas_basemul(%rip), %r11
vmovdqu (%rcx), %ymm2
vmovdqu 32(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu (%rdi), %ymm4
vmovdqu 32(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu (%r11), %ymm10
vmovdqu 32(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu (%r10), %ymm6
vmovdqu 32(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, (%r10)
vmovdqu %ymm1, 32(%r10)
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 64(%rdi), %ymm4
vmovdqu 96(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 64(%r11), %ymm10
vmovdqu 96(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 64(%r10), %ymm6
vmovdqu 96(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 64(%r10)
vmovdqu %ymm1, 96(%r10)
vmovdqu 128(%rcx), %ymm2
vmovdqu 160(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 128(%rdi), %ymm4
vmovdqu 160(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 128(%r11), %ymm10
vmovdqu 160(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 128(%r10), %ymm6
vmovdqu 160(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 128(%r10)
vmovdqu %ymm1, 160(%r10)
vmovdqu 192(%rcx), %ymm2
vmovdqu 224(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 192(%rdi), %ymm4
vmovdqu 224(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 192(%r11), %ymm10
vmovdqu 224(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 192(%r10), %ymm6
vmovdqu 224(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 192(%r10)
vmovdqu %ymm1, 224(%r10)
vmovdqu 256(%rcx), %ymm2
vmovdqu 288(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 256(%rdi), %ymm4
vmovdqu 288(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 256(%r11), %ymm10
vmovdqu 288(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 256(%r10), %ymm6
vmovdqu 288(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 256(%r10)
vmovdqu %ymm1, 288(%r10)
vmovdqu 320(%rcx), %ymm2
vmovdqu 352(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 320(%rdi), %ymm4
vmovdqu 352(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 320(%r11), %ymm10
vmovdqu 352(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 320(%r10), %ymm6
vmovdqu 352(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 320(%r10)
vmovdqu %ymm1, 352(%r10)
vmovdqu 384(%rcx), %ymm2
vmovdqu 416(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 384(%rdi), %ymm4
vmovdqu 416(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 384(%r11), %ymm10
vmovdqu 416(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 384(%r10), %ymm6
vmovdqu 416(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 384(%r10)
vmovdqu %ymm1, 416(%r10)
vmovdqu 448(%rcx), %ymm2
vmovdqu 480(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 448(%rdi), %ymm4
vmovdqu 480(%rdi), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 448(%r11), %ymm10
vmovdqu 480(%r11), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm13, %ymm1, %ymm8
vpmullw %ymm13, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 448(%r10), %ymm6
vmovdqu 480(%r10), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 448(%r10)
vmovdqu %ymm1, 480(%r10)
addq $0x200, %rcx
addq $0x200, %rdi
movq %r8, %r9
shl $9, %r9d
subq %r9, %rdi
addq $0x200, %r10
subq $0x01, %rax
jg L_kyber_keygen_avx2_acc
movq %r8, %rax
vmovdqu kyber_f(%rip), %ymm12
vmovdqu kyber_f_qinv(%rip), %ymm13
movq %r8, %rax
movq %rsi, %r10
L_kyber_keygen_avx2_to_mont:
# To Mont
vmovdqu (%r10), %ymm0
vmovdqu 32(%r10), %ymm1
vmovdqu 64(%r10), %ymm2
vmovdqu 96(%r10), %ymm3
vpmullw %ymm13, %ymm0, %ymm4
vpmulhw %ymm12, %ymm0, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm0
vpmullw %ymm13, %ymm1, %ymm4
vpmulhw %ymm12, %ymm1, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm1
vpmullw %ymm13, %ymm2, %ymm4
vpmulhw %ymm12, %ymm2, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm2
vpmullw %ymm13, %ymm3, %ymm4
vpmulhw %ymm12, %ymm3, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm3
vmovdqu %ymm0, (%r10)
vmovdqu %ymm1, 32(%r10)
vmovdqu %ymm2, 64(%r10)
vmovdqu %ymm3, 96(%r10)
vmovdqu 128(%r10), %ymm0
vmovdqu 160(%r10), %ymm1
vmovdqu 192(%r10), %ymm2
vmovdqu 224(%r10), %ymm3
vpmullw %ymm13, %ymm0, %ymm4
vpmulhw %ymm12, %ymm0, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm0
vpmullw %ymm13, %ymm1, %ymm4
vpmulhw %ymm12, %ymm1, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm1
vpmullw %ymm13, %ymm2, %ymm4
vpmulhw %ymm12, %ymm2, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm2
vpmullw %ymm13, %ymm3, %ymm4
vpmulhw %ymm12, %ymm3, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm3
vmovdqu %ymm0, 128(%r10)
vmovdqu %ymm1, 160(%r10)
vmovdqu %ymm2, 192(%r10)
vmovdqu %ymm3, 224(%r10)
vmovdqu 256(%r10), %ymm0
vmovdqu 288(%r10), %ymm1
vmovdqu 320(%r10), %ymm2
vmovdqu 352(%r10), %ymm3
vpmullw %ymm13, %ymm0, %ymm4
vpmulhw %ymm12, %ymm0, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm0
vpmullw %ymm13, %ymm1, %ymm4
vpmulhw %ymm12, %ymm1, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm1
vpmullw %ymm13, %ymm2, %ymm4
vpmulhw %ymm12, %ymm2, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm2
vpmullw %ymm13, %ymm3, %ymm4
vpmulhw %ymm12, %ymm3, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm3
vmovdqu %ymm0, 256(%r10)
vmovdqu %ymm1, 288(%r10)
vmovdqu %ymm2, 320(%r10)
vmovdqu %ymm3, 352(%r10)
vmovdqu 384(%r10), %ymm0
vmovdqu 416(%r10), %ymm1
vmovdqu 448(%r10), %ymm2
vmovdqu 480(%r10), %ymm3
vpmullw %ymm13, %ymm0, %ymm4
vpmulhw %ymm12, %ymm0, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm0
vpmullw %ymm13, %ymm1, %ymm4
vpmulhw %ymm12, %ymm1, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm1
vpmullw %ymm13, %ymm2, %ymm4
vpmulhw %ymm12, %ymm2, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm2
vpmullw %ymm13, %ymm3, %ymm4
vpmulhw %ymm12, %ymm3, %ymm5
vpmulhw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm5, %ymm3
vmovdqu %ymm0, 384(%r10)
vmovdqu %ymm1, 416(%r10)
vmovdqu %ymm2, 448(%r10)
vmovdqu %ymm3, 480(%r10)
addq $0x200, %r10
subq $0x01, %rax
jg L_kyber_keygen_avx2_to_mont
movq %r8, %rax
L_kyber_keygen_avx2_to_mont_ntt_err:
# ntt
leaq L_kyber_avx2_zetas(%rip), %r11
vmovdqu (%r11), %ymm10
vmovdqu 32(%r11), %ymm12
vmovdqu 128(%rdx), %ymm0
vmovdqu 160(%rdx), %ymm1
vmovdqu 192(%rdx), %ymm2
vmovdqu 224(%rdx), %ymm3
vmovdqu 384(%rdx), %ymm4
vmovdqu 416(%rdx), %ymm5
vmovdqu 448(%rdx), %ymm6
vmovdqu 480(%rdx), %ymm7
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
vmovdqu %ymm0, 128(%rdx)
vmovdqu %ymm1, 160(%rdx)
vmovdqu %ymm2, 192(%rdx)
vmovdqu %ymm3, 224(%rdx)
vmovdqu %ymm4, 384(%rdx)
vmovdqu %ymm5, 416(%rdx)
vmovdqu %ymm6, 448(%rdx)
vmovdqu %ymm7, 480(%rdx)
vmovdqu (%rdx), %ymm0
vmovdqu 32(%rdx), %ymm1
vmovdqu 64(%rdx), %ymm2
vmovdqu 96(%rdx), %ymm3
vmovdqu 256(%rdx), %ymm4
vmovdqu 288(%rdx), %ymm5
vmovdqu 320(%rdx), %ymm6
vmovdqu 352(%rdx), %ymm7
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
vmovdqu %ymm4, 256(%rdx)
vmovdqu %ymm5, 288(%rdx)
vmovdqu %ymm6, 320(%rdx)
vmovdqu %ymm7, 352(%rdx)
vmovdqu 128(%rdx), %ymm4
vmovdqu 160(%rdx), %ymm5
vmovdqu 192(%rdx), %ymm6
vmovdqu 224(%rdx), %ymm7
# 64: 0/3
vmovdqu 64(%r11), %ymm10
vmovdqu 96(%r11), %ymm12
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
# 32: 0/3
vmovdqu 128(%r11), %ymm10
vmovdqu 160(%r11), %ymm12
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm2
vpsubw %ymm9, %ymm1, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
# 32: 0/3
vmovdqu 192(%r11), %ymm10
vmovdqu 224(%r11), %ymm12
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm6
vpsubw %ymm9, %ymm5, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm5, %ymm5
# 16: 0/3
vmovdqu 256(%r11), %ymm10
vmovdqu 288(%r11), %ymm12
vmovdqu 320(%r11), %ymm11
vmovdqu 352(%r11), %ymm13
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 16: 0/3
vmovdqu 384(%r11), %ymm10
vmovdqu 416(%r11), %ymm12
vmovdqu 448(%r11), %ymm11
vmovdqu 480(%r11), %ymm13
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 8: 0/3
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu 512(%r11), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm1
vmovdqu 544(%r11), %ymm12
vperm2i128 $32, %ymm3, %ymm2, %ymm9
vmovdqu 576(%r11), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm3
vmovdqu 608(%r11), %ymm13
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm0
vpsubw %ymm2, %ymm3, %ymm2
vpsubw %ymm0, %ymm8, %ymm1
vpsubw %ymm2, %ymm9, %ymm3
vpaddw %ymm0, %ymm8, %ymm8
vpaddw %ymm2, %ymm9, %ymm9
# 4: 0/3
vmovdqu 640(%r11), %ymm10
vmovdqu 672(%r11), %ymm12
vmovdqu 704(%r11), %ymm11
vmovdqu 736(%r11), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 8: 0/3
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 768(%r11), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm5
vmovdqu 800(%r11), %ymm12
vperm2i128 $32, %ymm7, %ymm6, %ymm9
vmovdqu 832(%r11), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm7
vmovdqu 864(%r11), %ymm13
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm4
vpsubw %ymm6, %ymm7, %ymm6
vpsubw %ymm4, %ymm8, %ymm5
vpsubw %ymm6, %ymm9, %ymm7
vpaddw %ymm4, %ymm8, %ymm8
vpaddw %ymm6, %ymm9, %ymm9
# 4: 0/3
vmovdqu 896(%r11), %ymm10
vmovdqu 928(%r11), %ymm12
vmovdqu 960(%r11), %ymm11
vmovdqu 992(%r11), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 2: 0/3
vmovdqu 1024(%r11), %ymm10
vmovdqu 1056(%r11), %ymm12
vmovdqu 1088(%r11), %ymm11
vmovdqu 1120(%r11), %ymm13
vpsllq $32, %ymm1, %ymm8
vpsrlq $32, %ymm0, %ymm9
vpblendd $0xaa, %ymm8, %ymm0, %ymm0
vpblendd $0x55, %ymm9, %ymm1, %ymm1
vpsllq $32, %ymm3, %ymm8
vpsrlq $32, %ymm2, %ymm9
vpblendd $0xaa, %ymm8, %ymm2, %ymm2
vpblendd $0x55, %ymm9, %ymm3, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 2: 0/3
vmovdqu 1152(%r11), %ymm10
vmovdqu 1184(%r11), %ymm12
vmovdqu 1216(%r11), %ymm11
vmovdqu 1248(%r11), %ymm13
vpsllq $32, %ymm5, %ymm8
vpsrlq $32, %ymm4, %ymm9
vpblendd $0xaa, %ymm8, %ymm4, %ymm4
vpblendd $0x55, %ymm9, %ymm5, %ymm5
vpsllq $32, %ymm7, %ymm8
vpsrlq $32, %ymm6, %ymm9
vpblendd $0xaa, %ymm8, %ymm6, %ymm6
vpblendd $0x55, %ymm9, %ymm7, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
vpunpckldq %ymm1, %ymm0, %ymm8
vpunpckhdq %ymm1, %ymm0, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm0
vperm2i128 $49, %ymm9, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm2, %ymm8
vpunpckhdq %ymm3, %ymm2, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm2
vperm2i128 $49, %ymm9, %ymm8, %ymm3
vpunpckldq %ymm5, %ymm4, %ymm8
vpunpckhdq %ymm5, %ymm4, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm4
vperm2i128 $49, %ymm9, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm6, %ymm8
vpunpckhdq %ymm7, %ymm6, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm6
vperm2i128 $49, %ymm9, %ymm8, %ymm7
vmovdqu %ymm0, (%rdx)
vmovdqu %ymm1, 32(%rdx)
vmovdqu %ymm2, 64(%rdx)
vmovdqu %ymm3, 96(%rdx)
vmovdqu %ymm4, 128(%rdx)
vmovdqu %ymm5, 160(%rdx)
vmovdqu %ymm6, 192(%rdx)
vmovdqu %ymm7, 224(%rdx)
vmovdqu 256(%rdx), %ymm0
vmovdqu 288(%rdx), %ymm1
vmovdqu 320(%rdx), %ymm2
vmovdqu 352(%rdx), %ymm3
vmovdqu 384(%rdx), %ymm4
vmovdqu 416(%rdx), %ymm5
vmovdqu 448(%rdx), %ymm6
vmovdqu 480(%rdx), %ymm7
# 64: 1/3
vmovdqu 1280(%r11), %ymm10
vmovdqu 1312(%r11), %ymm12
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
# 32: 1/3
vmovdqu 1344(%r11), %ymm10
vmovdqu 1376(%r11), %ymm12
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm2
vpsubw %ymm9, %ymm1, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
# 32: 1/3
vmovdqu 1408(%r11), %ymm10
vmovdqu 1440(%r11), %ymm12
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm6
vpsubw %ymm9, %ymm5, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm5, %ymm5
# 16: 1/3
vmovdqu 1472(%r11), %ymm10
vmovdqu 1504(%r11), %ymm12
vmovdqu 1536(%r11), %ymm11
vmovdqu 1568(%r11), %ymm13
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 16: 1/3
vmovdqu 1600(%r11), %ymm10
vmovdqu 1632(%r11), %ymm12
vmovdqu 1664(%r11), %ymm11
vmovdqu 1696(%r11), %ymm13
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 8: 1/3
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu 1728(%r11), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm1
vmovdqu 1760(%r11), %ymm12
vperm2i128 $32, %ymm3, %ymm2, %ymm9
vmovdqu 1792(%r11), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm3
vmovdqu 1824(%r11), %ymm13
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm0
vpsubw %ymm2, %ymm3, %ymm2
vpsubw %ymm0, %ymm8, %ymm1
vpsubw %ymm2, %ymm9, %ymm3
vpaddw %ymm0, %ymm8, %ymm8
vpaddw %ymm2, %ymm9, %ymm9
# 4: 1/3
vmovdqu 1856(%r11), %ymm10
vmovdqu 1888(%r11), %ymm12
vmovdqu 1920(%r11), %ymm11
vmovdqu 1952(%r11), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 8: 1/3
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 1984(%r11), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm5
vmovdqu 2016(%r11), %ymm12
vperm2i128 $32, %ymm7, %ymm6, %ymm9
vmovdqu 2048(%r11), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm7
vmovdqu 2080(%r11), %ymm13
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm4
vpsubw %ymm6, %ymm7, %ymm6
vpsubw %ymm4, %ymm8, %ymm5
vpsubw %ymm6, %ymm9, %ymm7
vpaddw %ymm4, %ymm8, %ymm8
vpaddw %ymm6, %ymm9, %ymm9
# 4: 1/3
vmovdqu 2112(%r11), %ymm10
vmovdqu 2144(%r11), %ymm12
vmovdqu 2176(%r11), %ymm11
vmovdqu 2208(%r11), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 2: 1/3
vmovdqu 2240(%r11), %ymm10
vmovdqu 2272(%r11), %ymm12
vmovdqu 2304(%r11), %ymm11
vmovdqu 2336(%r11), %ymm13
vpsllq $32, %ymm1, %ymm8
vpsrlq $32, %ymm0, %ymm9
vpblendd $0xaa, %ymm8, %ymm0, %ymm0
vpblendd $0x55, %ymm9, %ymm1, %ymm1
vpsllq $32, %ymm3, %ymm8
vpsrlq $32, %ymm2, %ymm9
vpblendd $0xaa, %ymm8, %ymm2, %ymm2
vpblendd $0x55, %ymm9, %ymm3, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 2: 1/3
vmovdqu 2368(%r11), %ymm10
vmovdqu 2400(%r11), %ymm12
vmovdqu 2432(%r11), %ymm11
vmovdqu 2464(%r11), %ymm13
vpsllq $32, %ymm5, %ymm8
vpsrlq $32, %ymm4, %ymm9
vpblendd $0xaa, %ymm8, %ymm4, %ymm4
vpblendd $0x55, %ymm9, %ymm5, %ymm5
vpsllq $32, %ymm7, %ymm8
vpsrlq $32, %ymm6, %ymm9
vpblendd $0xaa, %ymm8, %ymm6, %ymm6
vpblendd $0x55, %ymm9, %ymm7, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
vpunpckldq %ymm1, %ymm0, %ymm8
vpunpckhdq %ymm1, %ymm0, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm0
vperm2i128 $49, %ymm9, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm2, %ymm8
vpunpckhdq %ymm3, %ymm2, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm2
vperm2i128 $49, %ymm9, %ymm8, %ymm3
vpunpckldq %ymm5, %ymm4, %ymm8
vpunpckhdq %ymm5, %ymm4, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm4
vperm2i128 $49, %ymm9, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm6, %ymm8
vpunpckhdq %ymm7, %ymm6, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm6
vperm2i128 $49, %ymm9, %ymm8, %ymm7
vmovdqu %ymm0, 256(%rdx)
vmovdqu %ymm1, 288(%rdx)
vmovdqu %ymm2, 320(%rdx)
vmovdqu %ymm3, 352(%rdx)
vmovdqu %ymm4, 384(%rdx)
vmovdqu %ymm5, 416(%rdx)
vmovdqu %ymm6, 448(%rdx)
vmovdqu %ymm7, 480(%rdx)
# Add Errors
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovdqu 64(%rsi), %ymm2
vmovdqu 96(%rsi), %ymm3
vmovdqu (%rdx), %ymm4
vmovdqu 32(%rdx), %ymm5
vmovdqu 64(%rdx), %ymm6
vmovdqu 96(%rdx), %ymm7
vpaddw %ymm4, %ymm0, %ymm4
vpaddw %ymm5, %ymm1, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm7, %ymm3, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm1, 32(%rsi)
vmovdqu %ymm2, 64(%rsi)
vmovdqu %ymm3, 96(%rsi)
vmovdqu 128(%rsi), %ymm0
vmovdqu 160(%rsi), %ymm1
vmovdqu 192(%rsi), %ymm2
vmovdqu 224(%rsi), %ymm3
vmovdqu 128(%rdx), %ymm4
vmovdqu 160(%rdx), %ymm5
vmovdqu 192(%rdx), %ymm6
vmovdqu 224(%rdx), %ymm7
vpaddw %ymm4, %ymm0, %ymm4
vpaddw %ymm5, %ymm1, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm7, %ymm3, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, 128(%rsi)
vmovdqu %ymm1, 160(%rsi)
vmovdqu %ymm2, 192(%rsi)
vmovdqu %ymm3, 224(%rsi)
vmovdqu 256(%rsi), %ymm0
vmovdqu 288(%rsi), %ymm1
vmovdqu 320(%rsi), %ymm2
vmovdqu 352(%rsi), %ymm3
vmovdqu 256(%rdx), %ymm4
vmovdqu 288(%rdx), %ymm5
vmovdqu 320(%rdx), %ymm6
vmovdqu 352(%rdx), %ymm7
vpaddw %ymm4, %ymm0, %ymm4
vpaddw %ymm5, %ymm1, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm7, %ymm3, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, 256(%rsi)
vmovdqu %ymm1, 288(%rsi)
vmovdqu %ymm2, 320(%rsi)
vmovdqu %ymm3, 352(%rsi)
vmovdqu 384(%rsi), %ymm0
vmovdqu 416(%rsi), %ymm1
vmovdqu 448(%rsi), %ymm2
vmovdqu 480(%rsi), %ymm3
vmovdqu 384(%rdx), %ymm4
vmovdqu 416(%rdx), %ymm5
vmovdqu 448(%rdx), %ymm6
vmovdqu 480(%rdx), %ymm7
vpaddw %ymm4, %ymm0, %ymm4
vpaddw %ymm5, %ymm1, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm7, %ymm3, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, 384(%rsi)
vmovdqu %ymm1, 416(%rsi)
vmovdqu %ymm2, 448(%rsi)
vmovdqu %ymm3, 480(%rsi)
addq $0x200, %rdx
addq $0x200, %rsi
subq $0x01, %rax
jg L_kyber_keygen_avx2_to_mont_ntt_err
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_keygen_avx2,.-kyber_keygen_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl kyber_encapsulate_avx2
.type kyber_encapsulate_avx2,@function
.align 16
kyber_encapsulate_avx2:
#else
.section __TEXT,__text
.globl _kyber_encapsulate_avx2
.p2align 4
_kyber_encapsulate_avx2:
#endif /* __APPLE__ */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq 40(%rsp), %rax
movq 48(%rsp), %r10
movq 56(%rsp), %r11
subq $48, %rsp
vmovdqu kyber_q(%rip), %ymm14
vmovdqu kyber_v(%rip), %ymm15
movq %r11, %r13
movq %r8, %r14
L_kyber_encapsulate_avx2_trans:
# ntt
leaq L_kyber_avx2_zetas(%rip), %r15
vmovdqu (%r15), %ymm10
vmovdqu 32(%r15), %ymm12
vmovdqu 128(%r14), %ymm0
vmovdqu 160(%r14), %ymm1
vmovdqu 192(%r14), %ymm2
vmovdqu 224(%r14), %ymm3
vmovdqu 384(%r14), %ymm4
vmovdqu 416(%r14), %ymm5
vmovdqu 448(%r14), %ymm6
vmovdqu 480(%r14), %ymm7
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
vmovdqu %ymm0, 128(%r14)
vmovdqu %ymm1, 160(%r14)
vmovdqu %ymm2, 192(%r14)
vmovdqu %ymm3, 224(%r14)
vmovdqu %ymm4, 384(%r14)
vmovdqu %ymm5, 416(%r14)
vmovdqu %ymm6, 448(%r14)
vmovdqu %ymm7, 480(%r14)
vmovdqu (%r14), %ymm0
vmovdqu 32(%r14), %ymm1
vmovdqu 64(%r14), %ymm2
vmovdqu 96(%r14), %ymm3
vmovdqu 256(%r14), %ymm4
vmovdqu 288(%r14), %ymm5
vmovdqu 320(%r14), %ymm6
vmovdqu 352(%r14), %ymm7
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
vmovdqu %ymm4, 256(%r14)
vmovdqu %ymm5, 288(%r14)
vmovdqu %ymm6, 320(%r14)
vmovdqu %ymm7, 352(%r14)
vmovdqu 128(%r14), %ymm4
vmovdqu 160(%r14), %ymm5
vmovdqu 192(%r14), %ymm6
vmovdqu 224(%r14), %ymm7
# 64: 0/3
vmovdqu 64(%r15), %ymm10
vmovdqu 96(%r15), %ymm12
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
# 32: 0/3
vmovdqu 128(%r15), %ymm10
vmovdqu 160(%r15), %ymm12
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm2
vpsubw %ymm9, %ymm1, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
# 32: 0/3
vmovdqu 192(%r15), %ymm10
vmovdqu 224(%r15), %ymm12
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm6
vpsubw %ymm9, %ymm5, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm5, %ymm5
# 16: 0/3
vmovdqu 256(%r15), %ymm10
vmovdqu 288(%r15), %ymm12
vmovdqu 320(%r15), %ymm11
vmovdqu 352(%r15), %ymm13
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 16: 0/3
vmovdqu 384(%r15), %ymm10
vmovdqu 416(%r15), %ymm12
vmovdqu 448(%r15), %ymm11
vmovdqu 480(%r15), %ymm13
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 8: 0/3
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu 512(%r15), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm1
vmovdqu 544(%r15), %ymm12
vperm2i128 $32, %ymm3, %ymm2, %ymm9
vmovdqu 576(%r15), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm3
vmovdqu 608(%r15), %ymm13
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm0
vpsubw %ymm2, %ymm3, %ymm2
vpsubw %ymm0, %ymm8, %ymm1
vpsubw %ymm2, %ymm9, %ymm3
vpaddw %ymm0, %ymm8, %ymm8
vpaddw %ymm2, %ymm9, %ymm9
# 4: 0/3
vmovdqu 640(%r15), %ymm10
vmovdqu 672(%r15), %ymm12
vmovdqu 704(%r15), %ymm11
vmovdqu 736(%r15), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 8: 0/3
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 768(%r15), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm5
vmovdqu 800(%r15), %ymm12
vperm2i128 $32, %ymm7, %ymm6, %ymm9
vmovdqu 832(%r15), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm7
vmovdqu 864(%r15), %ymm13
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm4
vpsubw %ymm6, %ymm7, %ymm6
vpsubw %ymm4, %ymm8, %ymm5
vpsubw %ymm6, %ymm9, %ymm7
vpaddw %ymm4, %ymm8, %ymm8
vpaddw %ymm6, %ymm9, %ymm9
# 4: 0/3
vmovdqu 896(%r15), %ymm10
vmovdqu 928(%r15), %ymm12
vmovdqu 960(%r15), %ymm11
vmovdqu 992(%r15), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 2: 0/3
vmovdqu 1024(%r15), %ymm10
vmovdqu 1056(%r15), %ymm12
vmovdqu 1088(%r15), %ymm11
vmovdqu 1120(%r15), %ymm13
vpsllq $32, %ymm1, %ymm8
vpsrlq $32, %ymm0, %ymm9
vpblendd $0xaa, %ymm8, %ymm0, %ymm0
vpblendd $0x55, %ymm9, %ymm1, %ymm1
vpsllq $32, %ymm3, %ymm8
vpsrlq $32, %ymm2, %ymm9
vpblendd $0xaa, %ymm8, %ymm2, %ymm2
vpblendd $0x55, %ymm9, %ymm3, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 2: 0/3
vmovdqu 1152(%r15), %ymm10
vmovdqu 1184(%r15), %ymm12
vmovdqu 1216(%r15), %ymm11
vmovdqu 1248(%r15), %ymm13
vpsllq $32, %ymm5, %ymm8
vpsrlq $32, %ymm4, %ymm9
vpblendd $0xaa, %ymm8, %ymm4, %ymm4
vpblendd $0x55, %ymm9, %ymm5, %ymm5
vpsllq $32, %ymm7, %ymm8
vpsrlq $32, %ymm6, %ymm9
vpblendd $0xaa, %ymm8, %ymm6, %ymm6
vpblendd $0x55, %ymm9, %ymm7, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
vpunpckldq %ymm1, %ymm0, %ymm8
vpunpckhdq %ymm1, %ymm0, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm0
vperm2i128 $49, %ymm9, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm2, %ymm8
vpunpckhdq %ymm3, %ymm2, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm2
vperm2i128 $49, %ymm9, %ymm8, %ymm3
vpunpckldq %ymm5, %ymm4, %ymm8
vpunpckhdq %ymm5, %ymm4, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm4
vperm2i128 $49, %ymm9, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm6, %ymm8
vpunpckhdq %ymm7, %ymm6, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm6
vperm2i128 $49, %ymm9, %ymm8, %ymm7
vmovdqu %ymm0, (%r14)
vmovdqu %ymm1, 32(%r14)
vmovdqu %ymm2, 64(%r14)
vmovdqu %ymm3, 96(%r14)
vmovdqu %ymm4, 128(%r14)
vmovdqu %ymm5, 160(%r14)
vmovdqu %ymm6, 192(%r14)
vmovdqu %ymm7, 224(%r14)
vmovdqu 256(%r14), %ymm0
vmovdqu 288(%r14), %ymm1
vmovdqu 320(%r14), %ymm2
vmovdqu 352(%r14), %ymm3
vmovdqu 384(%r14), %ymm4
vmovdqu 416(%r14), %ymm5
vmovdqu 448(%r14), %ymm6
vmovdqu 480(%r14), %ymm7
# 64: 1/3
vmovdqu 1280(%r15), %ymm10
vmovdqu 1312(%r15), %ymm12
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
# 32: 1/3
vmovdqu 1344(%r15), %ymm10
vmovdqu 1376(%r15), %ymm12
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm2
vpsubw %ymm9, %ymm1, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
# 32: 1/3
vmovdqu 1408(%r15), %ymm10
vmovdqu 1440(%r15), %ymm12
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm6
vpsubw %ymm9, %ymm5, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm5, %ymm5
# 16: 1/3
vmovdqu 1472(%r15), %ymm10
vmovdqu 1504(%r15), %ymm12
vmovdqu 1536(%r15), %ymm11
vmovdqu 1568(%r15), %ymm13
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 16: 1/3
vmovdqu 1600(%r15), %ymm10
vmovdqu 1632(%r15), %ymm12
vmovdqu 1664(%r15), %ymm11
vmovdqu 1696(%r15), %ymm13
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 8: 1/3
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu 1728(%r15), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm1
vmovdqu 1760(%r15), %ymm12
vperm2i128 $32, %ymm3, %ymm2, %ymm9
vmovdqu 1792(%r15), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm3
vmovdqu 1824(%r15), %ymm13
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm0
vpsubw %ymm2, %ymm3, %ymm2
vpsubw %ymm0, %ymm8, %ymm1
vpsubw %ymm2, %ymm9, %ymm3
vpaddw %ymm0, %ymm8, %ymm8
vpaddw %ymm2, %ymm9, %ymm9
# 4: 1/3
vmovdqu 1856(%r15), %ymm10
vmovdqu 1888(%r15), %ymm12
vmovdqu 1920(%r15), %ymm11
vmovdqu 1952(%r15), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 8: 1/3
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 1984(%r15), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm5
vmovdqu 2016(%r15), %ymm12
vperm2i128 $32, %ymm7, %ymm6, %ymm9
vmovdqu 2048(%r15), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm7
vmovdqu 2080(%r15), %ymm13
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm4
vpsubw %ymm6, %ymm7, %ymm6
vpsubw %ymm4, %ymm8, %ymm5
vpsubw %ymm6, %ymm9, %ymm7
vpaddw %ymm4, %ymm8, %ymm8
vpaddw %ymm6, %ymm9, %ymm9
# 4: 1/3
vmovdqu 2112(%r15), %ymm10
vmovdqu 2144(%r15), %ymm12
vmovdqu 2176(%r15), %ymm11
vmovdqu 2208(%r15), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 2: 1/3
vmovdqu 2240(%r15), %ymm10
vmovdqu 2272(%r15), %ymm12
vmovdqu 2304(%r15), %ymm11
vmovdqu 2336(%r15), %ymm13
vpsllq $32, %ymm1, %ymm8
vpsrlq $32, %ymm0, %ymm9
vpblendd $0xaa, %ymm8, %ymm0, %ymm0
vpblendd $0x55, %ymm9, %ymm1, %ymm1
vpsllq $32, %ymm3, %ymm8
vpsrlq $32, %ymm2, %ymm9
vpblendd $0xaa, %ymm8, %ymm2, %ymm2
vpblendd $0x55, %ymm9, %ymm3, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 2: 1/3
vmovdqu 2368(%r15), %ymm10
vmovdqu 2400(%r15), %ymm12
vmovdqu 2432(%r15), %ymm11
vmovdqu 2464(%r15), %ymm13
vpsllq $32, %ymm5, %ymm8
vpsrlq $32, %ymm4, %ymm9
vpblendd $0xaa, %ymm8, %ymm4, %ymm4
vpblendd $0x55, %ymm9, %ymm5, %ymm5
vpsllq $32, %ymm7, %ymm8
vpsrlq $32, %ymm6, %ymm9
vpblendd $0xaa, %ymm8, %ymm6, %ymm6
vpblendd $0x55, %ymm9, %ymm7, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
vpunpckldq %ymm1, %ymm0, %ymm8
vpunpckhdq %ymm1, %ymm0, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm0
vperm2i128 $49, %ymm9, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm2, %ymm8
vpunpckhdq %ymm3, %ymm2, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm2
vperm2i128 $49, %ymm9, %ymm8, %ymm3
vpunpckldq %ymm5, %ymm4, %ymm8
vpunpckhdq %ymm5, %ymm4, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm4
vperm2i128 $49, %ymm9, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm6, %ymm8
vpunpckhdq %ymm7, %ymm6, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm6
vperm2i128 $49, %ymm9, %ymm8, %ymm7
vmovdqu %ymm0, 256(%r14)
vmovdqu %ymm1, 288(%r14)
vmovdqu %ymm2, 320(%r14)
vmovdqu %ymm3, 352(%r14)
vmovdqu %ymm4, 384(%r14)
vmovdqu %ymm5, 416(%r14)
vmovdqu %ymm6, 448(%r14)
vmovdqu %ymm7, 480(%r14)
addq $0x200, %r14
subq $0x01, %r13
jg L_kyber_encapsulate_avx2_trans
movq %r11, %r12
L_kyber_encapsulate_avx2_calc:
vmovdqu kyber_qinv(%rip), %ymm12
# Pointwise acc mont
movq %r11, %r13
# Base mul mont
leaq L_kyber_avx2_zetas_basemul(%rip), %r15
vmovdqu (%rcx), %ymm2
vmovdqu 32(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu (%r8), %ymm4
vmovdqu 32(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu (%r15), %ymm10
vmovdqu 32(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm1, 32(%rsi)
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 64(%r8), %ymm4
vmovdqu 96(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 64(%r15), %ymm10
vmovdqu 96(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 64(%rsi)
vmovdqu %ymm1, 96(%rsi)
vmovdqu 128(%rcx), %ymm2
vmovdqu 160(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 128(%r8), %ymm4
vmovdqu 160(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 128(%r15), %ymm10
vmovdqu 160(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 128(%rsi)
vmovdqu %ymm1, 160(%rsi)
vmovdqu 192(%rcx), %ymm2
vmovdqu 224(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 192(%r8), %ymm4
vmovdqu 224(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 192(%r15), %ymm10
vmovdqu 224(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 192(%rsi)
vmovdqu %ymm1, 224(%rsi)
vmovdqu 256(%rcx), %ymm2
vmovdqu 288(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 256(%r8), %ymm4
vmovdqu 288(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 256(%r15), %ymm10
vmovdqu 288(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 256(%rsi)
vmovdqu %ymm1, 288(%rsi)
vmovdqu 320(%rcx), %ymm2
vmovdqu 352(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 320(%r8), %ymm4
vmovdqu 352(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 320(%r15), %ymm10
vmovdqu 352(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 320(%rsi)
vmovdqu %ymm1, 352(%rsi)
vmovdqu 384(%rcx), %ymm2
vmovdqu 416(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 384(%r8), %ymm4
vmovdqu 416(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 384(%r15), %ymm10
vmovdqu 416(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 384(%rsi)
vmovdqu %ymm1, 416(%rsi)
vmovdqu 448(%rcx), %ymm2
vmovdqu 480(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 448(%r8), %ymm4
vmovdqu 480(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 448(%r15), %ymm10
vmovdqu 480(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 448(%rsi)
vmovdqu %ymm1, 480(%rsi)
addq $0x200, %rcx
addq $0x200, %r8
subq $2, %r13
jz L_pointwise_acc_mont_end_encap_bp
L_pointwise_acc_mont_start_encap_bp:
# Base mul mont add
leaq L_kyber_avx2_zetas_basemul(%rip), %r15
vmovdqu (%rcx), %ymm2
vmovdqu 32(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu (%r8), %ymm4
vmovdqu 32(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu (%r15), %ymm10
vmovdqu 32(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu (%rsi), %ymm6
vmovdqu 32(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm1, 32(%rsi)
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 64(%r8), %ymm4
vmovdqu 96(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 64(%r15), %ymm10
vmovdqu 96(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 64(%rsi), %ymm6
vmovdqu 96(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 64(%rsi)
vmovdqu %ymm1, 96(%rsi)
vmovdqu 128(%rcx), %ymm2
vmovdqu 160(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 128(%r8), %ymm4
vmovdqu 160(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 128(%r15), %ymm10
vmovdqu 160(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 128(%rsi), %ymm6
vmovdqu 160(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 128(%rsi)
vmovdqu %ymm1, 160(%rsi)
vmovdqu 192(%rcx), %ymm2
vmovdqu 224(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 192(%r8), %ymm4
vmovdqu 224(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 192(%r15), %ymm10
vmovdqu 224(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 192(%rsi), %ymm6
vmovdqu 224(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 192(%rsi)
vmovdqu %ymm1, 224(%rsi)
vmovdqu 256(%rcx), %ymm2
vmovdqu 288(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 256(%r8), %ymm4
vmovdqu 288(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 256(%r15), %ymm10
vmovdqu 288(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 256(%rsi), %ymm6
vmovdqu 288(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 256(%rsi)
vmovdqu %ymm1, 288(%rsi)
vmovdqu 320(%rcx), %ymm2
vmovdqu 352(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 320(%r8), %ymm4
vmovdqu 352(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 320(%r15), %ymm10
vmovdqu 352(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 320(%rsi), %ymm6
vmovdqu 352(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 320(%rsi)
vmovdqu %ymm1, 352(%rsi)
vmovdqu 384(%rcx), %ymm2
vmovdqu 416(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 384(%r8), %ymm4
vmovdqu 416(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 384(%r15), %ymm10
vmovdqu 416(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 384(%rsi), %ymm6
vmovdqu 416(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 384(%rsi)
vmovdqu %ymm1, 416(%rsi)
vmovdqu 448(%rcx), %ymm2
vmovdqu 480(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 448(%r8), %ymm4
vmovdqu 480(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 448(%r15), %ymm10
vmovdqu 480(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 448(%rsi), %ymm6
vmovdqu 480(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 448(%rsi)
vmovdqu %ymm1, 480(%rsi)
addq $0x200, %rcx
addq $0x200, %r8
subq $0x01, %r13
jg L_pointwise_acc_mont_start_encap_bp
L_pointwise_acc_mont_end_encap_bp:
# Base mul mont add
leaq L_kyber_avx2_zetas_basemul(%rip), %r15
vmovdqu (%rcx), %ymm2
vmovdqu 32(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu (%r8), %ymm4
vmovdqu 32(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu (%r15), %ymm10
vmovdqu 32(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu (%rsi), %ymm6
vmovdqu 32(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm1, 32(%rsi)
vmovdqu 64(%rcx), %ymm2
vmovdqu 96(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 64(%r8), %ymm4
vmovdqu 96(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 64(%r15), %ymm10
vmovdqu 96(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 64(%rsi), %ymm6
vmovdqu 96(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 64(%rsi)
vmovdqu %ymm1, 96(%rsi)
vmovdqu 128(%rcx), %ymm2
vmovdqu 160(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 128(%r8), %ymm4
vmovdqu 160(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 128(%r15), %ymm10
vmovdqu 160(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 128(%rsi), %ymm6
vmovdqu 160(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 128(%rsi)
vmovdqu %ymm1, 160(%rsi)
vmovdqu 192(%rcx), %ymm2
vmovdqu 224(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 192(%r8), %ymm4
vmovdqu 224(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 192(%r15), %ymm10
vmovdqu 224(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 192(%rsi), %ymm6
vmovdqu 224(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 192(%rsi)
vmovdqu %ymm1, 224(%rsi)
vmovdqu 256(%rcx), %ymm2
vmovdqu 288(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 256(%r8), %ymm4
vmovdqu 288(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 256(%r15), %ymm10
vmovdqu 288(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 256(%rsi), %ymm6
vmovdqu 288(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 256(%rsi)
vmovdqu %ymm1, 288(%rsi)
vmovdqu 320(%rcx), %ymm2
vmovdqu 352(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 320(%r8), %ymm4
vmovdqu 352(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 320(%r15), %ymm10
vmovdqu 352(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 320(%rsi), %ymm6
vmovdqu 352(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 320(%rsi)
vmovdqu %ymm1, 352(%rsi)
vmovdqu 384(%rcx), %ymm2
vmovdqu 416(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 384(%r8), %ymm4
vmovdqu 416(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 384(%r15), %ymm10
vmovdqu 416(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 384(%rsi), %ymm6
vmovdqu 416(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 384(%rsi)
vmovdqu %ymm1, 416(%rsi)
vmovdqu 448(%rcx), %ymm2
vmovdqu 480(%rcx), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 448(%r8), %ymm4
vmovdqu 480(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 448(%r15), %ymm10
vmovdqu 480(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 448(%rsi), %ymm6
vmovdqu 480(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 448(%rsi)
vmovdqu %ymm1, 480(%rsi)
addq $0x200, %rcx
addq $0x200, %r8
movq %r11, %r13
shl $9, %r13d
subq %r13, %r8
# invntt
leaq L_kyber_avx2_zetas_inv(%rip), %r15
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovdqu 64(%rsi), %ymm2
vmovdqu 96(%rsi), %ymm3
vmovdqu 128(%rsi), %ymm4
vmovdqu 160(%rsi), %ymm5
vmovdqu 192(%rsi), %ymm6
vmovdqu 224(%rsi), %ymm7
# 2: 1/2
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu (%r15), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm9
vmovdqu 32(%r15), %ymm12
vpsllq $32, %ymm9, %ymm0
vpsrlq $32, %ymm8, %ymm1
vpblendd $0xaa, %ymm0, %ymm8, %ymm0
vpblendd $0x55, %ymm1, %ymm9, %ymm1
vperm2i128 $32, %ymm3, %ymm2, %ymm8
vmovdqu 64(%r15), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm9
vmovdqu 96(%r15), %ymm13
vpsllq $32, %ymm9, %ymm2
vpsrlq $32, %ymm8, %ymm3
vpblendd $0xaa, %ymm2, %ymm8, %ymm2
vpblendd $0x55, %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 4: 1/2
vmovdqu 128(%r15), %ymm10
vmovdqu 160(%r15), %ymm12
vmovdqu 192(%r15), %ymm11
vmovdqu 224(%r15), %ymm13
vpunpckldq %ymm1, %ymm8, %ymm0
vpunpckhdq %ymm1, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm9, %ymm2
vpunpckhdq %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmulhw %ymm15, %ymm8, %ymm0
vpmulhw %ymm15, %ymm9, %ymm2
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm2, %ymm2
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm8, %ymm8
vpsubw %ymm2, %ymm9, %ymm9
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 8: 1/2
vmovdqu 256(%r15), %ymm10
vmovdqu 288(%r15), %ymm12
vmovdqu 320(%r15), %ymm11
vmovdqu 352(%r15), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 16: 1/2
vperm2i128 $32, %ymm1, %ymm8, %ymm0
vmovdqu 384(%r15), %ymm10
vperm2i128 $49, %ymm1, %ymm8, %ymm1
vmovdqu 416(%r15), %ymm12
vperm2i128 $32, %ymm3, %ymm9, %ymm2
vmovdqu 448(%r15), %ymm11
vperm2i128 $49, %ymm3, %ymm9, %ymm3
vmovdqu 480(%r15), %ymm13
vpsubw %ymm1, %ymm0, %ymm8
vpsubw %ymm3, %ymm2, %ymm9
vpaddw %ymm1, %ymm0, %ymm0
vpaddw %ymm3, %ymm2, %ymm2
vpmullw %ymm12, %ymm8, %ymm1
vpmullw %ymm13, %ymm9, %ymm3
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm11, %ymm9, %ymm9
vpmulhw %ymm14, %ymm1, %ymm1
vpmulhw %ymm14, %ymm3, %ymm3
vpsubw %ymm1, %ymm8, %ymm1
vpsubw %ymm3, %ymm9, %ymm3
# 32: 1/2
vmovdqu 512(%r15), %ymm10
vmovdqu 544(%r15), %ymm12
vpaddw %ymm2, %ymm0, %ymm8
vpaddw %ymm3, %ymm1, %ymm9
vpsubw %ymm2, %ymm0, %ymm2
vpsubw %ymm3, %ymm1, %ymm3
vpmulhw %ymm15, %ymm8, %ymm0
vpmulhw %ymm15, %ymm9, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm8, %ymm0
vpsubw %ymm1, %ymm9, %ymm1
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm9, %ymm3, %ymm3
# 2: 1/2
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 576(%r15), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm9
vmovdqu 608(%r15), %ymm12
vpsllq $32, %ymm9, %ymm4
vpsrlq $32, %ymm8, %ymm5
vpblendd $0xaa, %ymm4, %ymm8, %ymm4
vpblendd $0x55, %ymm5, %ymm9, %ymm5
vperm2i128 $32, %ymm7, %ymm6, %ymm8
vmovdqu 640(%r15), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm9
vmovdqu 672(%r15), %ymm13
vpsllq $32, %ymm9, %ymm6
vpsrlq $32, %ymm8, %ymm7
vpblendd $0xaa, %ymm6, %ymm8, %ymm6
vpblendd $0x55, %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 4: 1/2
vmovdqu 704(%r15), %ymm10
vmovdqu 736(%r15), %ymm12
vmovdqu 768(%r15), %ymm11
vmovdqu 800(%r15), %ymm13
vpunpckldq %ymm5, %ymm8, %ymm4
vpunpckhdq %ymm5, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm9, %ymm6
vpunpckhdq %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmulhw %ymm15, %ymm8, %ymm4
vpmulhw %ymm15, %ymm9, %ymm6
vpsraw $10, %ymm4, %ymm4
vpsraw $10, %ymm6, %ymm6
vpmullw %ymm14, %ymm4, %ymm4
vpmullw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm8, %ymm8
vpsubw %ymm6, %ymm9, %ymm9
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 8: 1/2
vmovdqu 832(%r15), %ymm10
vmovdqu 864(%r15), %ymm12
vmovdqu 896(%r15), %ymm11
vmovdqu 928(%r15), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 16: 1/2
vperm2i128 $32, %ymm5, %ymm8, %ymm4
vmovdqu 960(%r15), %ymm10
vperm2i128 $49, %ymm5, %ymm8, %ymm5
vmovdqu 992(%r15), %ymm12
vperm2i128 $32, %ymm7, %ymm9, %ymm6
vmovdqu 1024(%r15), %ymm11
vperm2i128 $49, %ymm7, %ymm9, %ymm7
vmovdqu 1056(%r15), %ymm13
vpsubw %ymm5, %ymm4, %ymm8
vpsubw %ymm7, %ymm6, %ymm9
vpaddw %ymm5, %ymm4, %ymm4
vpaddw %ymm7, %ymm6, %ymm6
vpmullw %ymm12, %ymm8, %ymm5
vpmullw %ymm13, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm11, %ymm9, %ymm9
vpmulhw %ymm14, %ymm5, %ymm5
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm5, %ymm8, %ymm5
vpsubw %ymm7, %ymm9, %ymm7
# 32: 1/2
vmovdqu 1088(%r15), %ymm10
vmovdqu 1120(%r15), %ymm12
vpaddw %ymm6, %ymm4, %ymm8
vpaddw %ymm7, %ymm5, %ymm9
vpsubw %ymm6, %ymm4, %ymm6
vpsubw %ymm7, %ymm5, %ymm7
vpmulhw %ymm15, %ymm8, %ymm4
vpmulhw %ymm15, %ymm9, %ymm5
vpsraw $10, %ymm4, %ymm4
vpsraw $10, %ymm5, %ymm5
vpmullw %ymm14, %ymm4, %ymm4
vpmullw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm6
vpsubw %ymm9, %ymm7, %ymm7
# 64: 1/2
vmovdqu 1152(%r15), %ymm10
vmovdqu 1184(%r15), %ymm12
vpsubw %ymm4, %ymm0, %ymm8
vpsubw %ymm5, %ymm1, %ymm9
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpmullw %ymm12, %ymm8, %ymm4
vpmullw %ymm12, %ymm9, %ymm5
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpsubw %ymm6, %ymm2, %ymm8
vpsubw %ymm7, %ymm3, %ymm9
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm8, %ymm6
vpmullw %ymm12, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm6, %ymm6
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm8, %ymm6
vpsubw %ymm7, %ymm9, %ymm7
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm1, 32(%rsi)
vmovdqu %ymm2, 64(%rsi)
vmovdqu %ymm3, 96(%rsi)
vmovdqu %ymm4, 128(%rsi)
vmovdqu %ymm5, 160(%rsi)
vmovdqu %ymm6, 192(%rsi)
vmovdqu %ymm7, 224(%rsi)
vmovdqu 256(%rsi), %ymm0
vmovdqu 288(%rsi), %ymm1
vmovdqu 320(%rsi), %ymm2
vmovdqu 352(%rsi), %ymm3
vmovdqu 384(%rsi), %ymm4
vmovdqu 416(%rsi), %ymm5
vmovdqu 448(%rsi), %ymm6
vmovdqu 480(%rsi), %ymm7
# 2: 2/2
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu 1216(%r15), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm9
vmovdqu 1248(%r15), %ymm12
vpsllq $32, %ymm9, %ymm0
vpsrlq $32, %ymm8, %ymm1
vpblendd $0xaa, %ymm0, %ymm8, %ymm0
vpblendd $0x55, %ymm1, %ymm9, %ymm1
vperm2i128 $32, %ymm3, %ymm2, %ymm8
vmovdqu 1280(%r15), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm9
vmovdqu 1312(%r15), %ymm13
vpsllq $32, %ymm9, %ymm2
vpsrlq $32, %ymm8, %ymm3
vpblendd $0xaa, %ymm2, %ymm8, %ymm2
vpblendd $0x55, %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 4: 2/2
vmovdqu 1344(%r15), %ymm10
vmovdqu 1376(%r15), %ymm12
vmovdqu 1408(%r15), %ymm11
vmovdqu 1440(%r15), %ymm13
vpunpckldq %ymm1, %ymm8, %ymm0
vpunpckhdq %ymm1, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm9, %ymm2
vpunpckhdq %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmulhw %ymm15, %ymm8, %ymm0
vpmulhw %ymm15, %ymm9, %ymm2
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm2, %ymm2
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm8, %ymm8
vpsubw %ymm2, %ymm9, %ymm9
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 8: 2/2
vmovdqu 1472(%r15), %ymm10
vmovdqu 1504(%r15), %ymm12
vmovdqu 1536(%r15), %ymm11
vmovdqu 1568(%r15), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 16: 2/2
vperm2i128 $32, %ymm1, %ymm8, %ymm0
vmovdqu 1600(%r15), %ymm10
vperm2i128 $49, %ymm1, %ymm8, %ymm1
vmovdqu 1632(%r15), %ymm12
vperm2i128 $32, %ymm3, %ymm9, %ymm2
vmovdqu 1664(%r15), %ymm11
vperm2i128 $49, %ymm3, %ymm9, %ymm3
vmovdqu 1696(%r15), %ymm13
vpsubw %ymm1, %ymm0, %ymm8
vpsubw %ymm3, %ymm2, %ymm9
vpaddw %ymm1, %ymm0, %ymm0
vpaddw %ymm3, %ymm2, %ymm2
vpmullw %ymm12, %ymm8, %ymm1
vpmullw %ymm13, %ymm9, %ymm3
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm11, %ymm9, %ymm9
vpmulhw %ymm14, %ymm1, %ymm1
vpmulhw %ymm14, %ymm3, %ymm3
vpsubw %ymm1, %ymm8, %ymm1
vpsubw %ymm3, %ymm9, %ymm3
# 32: 2/2
vmovdqu 1728(%r15), %ymm10
vmovdqu 1760(%r15), %ymm12
vpaddw %ymm2, %ymm0, %ymm8
vpaddw %ymm3, %ymm1, %ymm9
vpsubw %ymm2, %ymm0, %ymm2
vpsubw %ymm3, %ymm1, %ymm3
vpmulhw %ymm15, %ymm8, %ymm0
vpmulhw %ymm15, %ymm9, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm8, %ymm0
vpsubw %ymm1, %ymm9, %ymm1
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm9, %ymm3, %ymm3
# 2: 2/2
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 1792(%r15), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm9
vmovdqu 1824(%r15), %ymm12
vpsllq $32, %ymm9, %ymm4
vpsrlq $32, %ymm8, %ymm5
vpblendd $0xaa, %ymm4, %ymm8, %ymm4
vpblendd $0x55, %ymm5, %ymm9, %ymm5
vperm2i128 $32, %ymm7, %ymm6, %ymm8
vmovdqu 1856(%r15), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm9
vmovdqu 1888(%r15), %ymm13
vpsllq $32, %ymm9, %ymm6
vpsrlq $32, %ymm8, %ymm7
vpblendd $0xaa, %ymm6, %ymm8, %ymm6
vpblendd $0x55, %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 4: 2/2
vmovdqu 1920(%r15), %ymm10
vmovdqu 1952(%r15), %ymm12
vmovdqu 1984(%r15), %ymm11
vmovdqu 2016(%r15), %ymm13
vpunpckldq %ymm5, %ymm8, %ymm4
vpunpckhdq %ymm5, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm9, %ymm6
vpunpckhdq %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmulhw %ymm15, %ymm8, %ymm4
vpmulhw %ymm15, %ymm9, %ymm6
vpsraw $10, %ymm4, %ymm4
vpsraw $10, %ymm6, %ymm6
vpmullw %ymm14, %ymm4, %ymm4
vpmullw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm8, %ymm8
vpsubw %ymm6, %ymm9, %ymm9
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 8: 2/2
vmovdqu 2048(%r15), %ymm10
vmovdqu 2080(%r15), %ymm12
vmovdqu 2112(%r15), %ymm11
vmovdqu 2144(%r15), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 16: 2/2
vperm2i128 $32, %ymm5, %ymm8, %ymm4
vmovdqu 2176(%r15), %ymm10
vperm2i128 $49, %ymm5, %ymm8, %ymm5
vmovdqu 2208(%r15), %ymm12
vperm2i128 $32, %ymm7, %ymm9, %ymm6
vmovdqu 2240(%r15), %ymm11
vperm2i128 $49, %ymm7, %ymm9, %ymm7
vmovdqu 2272(%r15), %ymm13
vpsubw %ymm5, %ymm4, %ymm8
vpsubw %ymm7, %ymm6, %ymm9
vpaddw %ymm5, %ymm4, %ymm4
vpaddw %ymm7, %ymm6, %ymm6
vpmullw %ymm12, %ymm8, %ymm5
vpmullw %ymm13, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm11, %ymm9, %ymm9
vpmulhw %ymm14, %ymm5, %ymm5
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm5, %ymm8, %ymm5
vpsubw %ymm7, %ymm9, %ymm7
# 32: 2/2
vmovdqu 2304(%r15), %ymm10
vmovdqu 2336(%r15), %ymm12
vpaddw %ymm6, %ymm4, %ymm8
vpaddw %ymm7, %ymm5, %ymm9
vpsubw %ymm6, %ymm4, %ymm6
vpsubw %ymm7, %ymm5, %ymm7
vpmulhw %ymm15, %ymm8, %ymm4
vpmulhw %ymm15, %ymm9, %ymm5
vpsraw $10, %ymm4, %ymm4
vpsraw $10, %ymm5, %ymm5
vpmullw %ymm14, %ymm4, %ymm4
vpmullw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm6
vpsubw %ymm9, %ymm7, %ymm7
# 64: 2/2
vmovdqu 2368(%r15), %ymm10
vmovdqu 2400(%r15), %ymm12
vpsubw %ymm4, %ymm0, %ymm8
vpsubw %ymm5, %ymm1, %ymm9
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpmullw %ymm12, %ymm8, %ymm4
vpmullw %ymm12, %ymm9, %ymm5
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpsubw %ymm6, %ymm2, %ymm8
vpsubw %ymm7, %ymm3, %ymm9
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm8, %ymm6
vpmullw %ymm12, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm6, %ymm6
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm8, %ymm6
vpsubw %ymm7, %ymm9, %ymm7
vmovdqu %ymm0, 256(%rsi)
vmovdqu %ymm1, 288(%rsi)
vmovdqu %ymm2, 320(%rsi)
vmovdqu %ymm3, 352(%rsi)
# 128
vmovdqu 2432(%r15), %ymm10
vmovdqu 2464(%r15), %ymm12
vmovdqu 2496(%r15), %ymm11
vmovdqu 2528(%r15), %ymm13
vmovdqu 128(%rsi), %ymm0
vmovdqu 160(%rsi), %ymm1
vmovdqu 192(%rsi), %ymm2
vmovdqu 224(%rsi), %ymm3
vpsubw %ymm4, %ymm0, %ymm8
vpsubw %ymm5, %ymm1, %ymm9
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpmullw %ymm12, %ymm8, %ymm4
vpmullw %ymm12, %ymm9, %ymm5
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpsubw %ymm6, %ymm2, %ymm8
vpsubw %ymm7, %ymm3, %ymm9
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm8, %ymm6
vpmullw %ymm12, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm6, %ymm6
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm8, %ymm6
vpsubw %ymm7, %ymm9, %ymm7
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm11, %ymm0, %ymm0
vpmulhw %ymm11, %ymm1, %ymm1
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm0, %ymm0
vpsubw %ymm9, %ymm1, %ymm1
vpmullw %ymm13, %ymm2, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm11, %ymm2, %ymm2
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm9, %ymm3, %ymm3
vpmullw %ymm13, %ymm4, %ymm8
vpmullw %ymm13, %ymm5, %ymm9
vpmulhw %ymm11, %ymm4, %ymm4
vpmulhw %ymm11, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm4
vpsubw %ymm9, %ymm5, %ymm5
vpmullw %ymm13, %ymm6, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm11, %ymm6, %ymm6
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm6
vpsubw %ymm9, %ymm7, %ymm7
vmovdqu %ymm0, 128(%rsi)
vmovdqu %ymm1, 160(%rsi)
vmovdqu %ymm2, 192(%rsi)
vmovdqu %ymm3, 224(%rsi)
vmovdqu %ymm4, 384(%rsi)
vmovdqu %ymm5, 416(%rsi)
vmovdqu %ymm6, 448(%rsi)
vmovdqu %ymm7, 480(%rsi)
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovdqu 64(%rsi), %ymm2
vmovdqu 96(%rsi), %ymm3
vmovdqu 256(%rsi), %ymm4
vmovdqu 288(%rsi), %ymm5
vmovdqu 320(%rsi), %ymm6
vmovdqu 352(%rsi), %ymm7
vpsubw %ymm4, %ymm0, %ymm8
vpsubw %ymm5, %ymm1, %ymm9
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpmullw %ymm12, %ymm8, %ymm4
vpmullw %ymm12, %ymm9, %ymm5
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpsubw %ymm6, %ymm2, %ymm8
vpsubw %ymm7, %ymm3, %ymm9
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm8, %ymm6
vpmullw %ymm12, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm6, %ymm6
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm8, %ymm6
vpsubw %ymm7, %ymm9, %ymm7
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm11, %ymm0, %ymm0
vpmulhw %ymm11, %ymm1, %ymm1
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm0, %ymm0
vpsubw %ymm9, %ymm1, %ymm1
vpmullw %ymm13, %ymm2, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm11, %ymm2, %ymm2
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm9, %ymm3, %ymm3
vpmullw %ymm13, %ymm4, %ymm8
vpmullw %ymm13, %ymm5, %ymm9
vpmulhw %ymm11, %ymm4, %ymm4
vpmulhw %ymm11, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm4
vpsubw %ymm9, %ymm5, %ymm5
vpmullw %ymm13, %ymm6, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm11, %ymm6, %ymm6
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm6
vpsubw %ymm9, %ymm7, %ymm7
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm1, 32(%rsi)
vmovdqu %ymm2, 64(%rsi)
vmovdqu %ymm3, 96(%rsi)
vmovdqu %ymm4, 256(%rsi)
vmovdqu %ymm5, 288(%rsi)
vmovdqu %ymm6, 320(%rsi)
vmovdqu %ymm7, 352(%rsi)
# Add Errors
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovdqu 64(%rsi), %ymm2
vmovdqu 96(%rsi), %ymm3
vmovdqu (%r9), %ymm4
vmovdqu 32(%r9), %ymm5
vmovdqu 64(%r9), %ymm6
vmovdqu 96(%r9), %ymm7
vpaddw %ymm4, %ymm0, %ymm4
vpaddw %ymm5, %ymm1, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm7, %ymm3, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm1, 32(%rsi)
vmovdqu %ymm2, 64(%rsi)
vmovdqu %ymm3, 96(%rsi)
vmovdqu 128(%rsi), %ymm0
vmovdqu 160(%rsi), %ymm1
vmovdqu 192(%rsi), %ymm2
vmovdqu 224(%rsi), %ymm3
vmovdqu 128(%r9), %ymm4
vmovdqu 160(%r9), %ymm5
vmovdqu 192(%r9), %ymm6
vmovdqu 224(%r9), %ymm7
vpaddw %ymm4, %ymm0, %ymm4
vpaddw %ymm5, %ymm1, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm7, %ymm3, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, 128(%rsi)
vmovdqu %ymm1, 160(%rsi)
vmovdqu %ymm2, 192(%rsi)
vmovdqu %ymm3, 224(%rsi)
vmovdqu 256(%rsi), %ymm0
vmovdqu 288(%rsi), %ymm1
vmovdqu 320(%rsi), %ymm2
vmovdqu 352(%rsi), %ymm3
vmovdqu 256(%r9), %ymm4
vmovdqu 288(%r9), %ymm5
vmovdqu 320(%r9), %ymm6
vmovdqu 352(%r9), %ymm7
vpaddw %ymm4, %ymm0, %ymm4
vpaddw %ymm5, %ymm1, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm7, %ymm3, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, 256(%rsi)
vmovdqu %ymm1, 288(%rsi)
vmovdqu %ymm2, 320(%rsi)
vmovdqu %ymm3, 352(%rsi)
vmovdqu 384(%rsi), %ymm0
vmovdqu 416(%rsi), %ymm1
vmovdqu 448(%rsi), %ymm2
vmovdqu 480(%rsi), %ymm3
vmovdqu 384(%r9), %ymm4
vmovdqu 416(%r9), %ymm5
vmovdqu 448(%r9), %ymm6
vmovdqu 480(%r9), %ymm7
vpaddw %ymm4, %ymm0, %ymm4
vpaddw %ymm5, %ymm1, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm7, %ymm3, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, 384(%rsi)
vmovdqu %ymm1, 416(%rsi)
vmovdqu %ymm2, 448(%rsi)
vmovdqu %ymm3, 480(%rsi)
addq $0x200, %r9
addq $0x200, %rsi
subq $0x01, %r12
jg L_kyber_encapsulate_avx2_calc
vmovdqu kyber_qinv(%rip), %ymm12
# Pointwise acc mont
movq %r11, %r13
# Base mul mont
leaq L_kyber_avx2_zetas_basemul(%rip), %r15
vmovdqu (%rdi), %ymm2
vmovdqu 32(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu (%r8), %ymm4
vmovdqu 32(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu (%r15), %ymm10
vmovdqu 32(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, (%rdx)
vmovdqu %ymm1, 32(%rdx)
vmovdqu 64(%rdi), %ymm2
vmovdqu 96(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 64(%r8), %ymm4
vmovdqu 96(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 64(%r15), %ymm10
vmovdqu 96(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 64(%rdx)
vmovdqu %ymm1, 96(%rdx)
vmovdqu 128(%rdi), %ymm2
vmovdqu 160(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 128(%r8), %ymm4
vmovdqu 160(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 128(%r15), %ymm10
vmovdqu 160(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 128(%rdx)
vmovdqu %ymm1, 160(%rdx)
vmovdqu 192(%rdi), %ymm2
vmovdqu 224(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 192(%r8), %ymm4
vmovdqu 224(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 192(%r15), %ymm10
vmovdqu 224(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 192(%rdx)
vmovdqu %ymm1, 224(%rdx)
vmovdqu 256(%rdi), %ymm2
vmovdqu 288(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 256(%r8), %ymm4
vmovdqu 288(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 256(%r15), %ymm10
vmovdqu 288(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 256(%rdx)
vmovdqu %ymm1, 288(%rdx)
vmovdqu 320(%rdi), %ymm2
vmovdqu 352(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 320(%r8), %ymm4
vmovdqu 352(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 320(%r15), %ymm10
vmovdqu 352(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 320(%rdx)
vmovdqu %ymm1, 352(%rdx)
vmovdqu 384(%rdi), %ymm2
vmovdqu 416(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 384(%r8), %ymm4
vmovdqu 416(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 384(%r15), %ymm10
vmovdqu 416(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 384(%rdx)
vmovdqu %ymm1, 416(%rdx)
vmovdqu 448(%rdi), %ymm2
vmovdqu 480(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 448(%r8), %ymm4
vmovdqu 480(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 448(%r15), %ymm10
vmovdqu 480(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 448(%rdx)
vmovdqu %ymm1, 480(%rdx)
addq $0x200, %rdi
addq $0x200, %r8
subq $2, %r13
jz L_pointwise_acc_mont_end_encap_v
L_pointwise_acc_mont_start_encap_v:
# Base mul mont add
leaq L_kyber_avx2_zetas_basemul(%rip), %r15
vmovdqu (%rdi), %ymm2
vmovdqu 32(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu (%r8), %ymm4
vmovdqu 32(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu (%r15), %ymm10
vmovdqu 32(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu (%rdx), %ymm6
vmovdqu 32(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, (%rdx)
vmovdqu %ymm1, 32(%rdx)
vmovdqu 64(%rdi), %ymm2
vmovdqu 96(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 64(%r8), %ymm4
vmovdqu 96(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 64(%r15), %ymm10
vmovdqu 96(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 64(%rdx), %ymm6
vmovdqu 96(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 64(%rdx)
vmovdqu %ymm1, 96(%rdx)
vmovdqu 128(%rdi), %ymm2
vmovdqu 160(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 128(%r8), %ymm4
vmovdqu 160(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 128(%r15), %ymm10
vmovdqu 160(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 128(%rdx), %ymm6
vmovdqu 160(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 128(%rdx)
vmovdqu %ymm1, 160(%rdx)
vmovdqu 192(%rdi), %ymm2
vmovdqu 224(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 192(%r8), %ymm4
vmovdqu 224(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 192(%r15), %ymm10
vmovdqu 224(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 192(%rdx), %ymm6
vmovdqu 224(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 192(%rdx)
vmovdqu %ymm1, 224(%rdx)
vmovdqu 256(%rdi), %ymm2
vmovdqu 288(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 256(%r8), %ymm4
vmovdqu 288(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 256(%r15), %ymm10
vmovdqu 288(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 256(%rdx), %ymm6
vmovdqu 288(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 256(%rdx)
vmovdqu %ymm1, 288(%rdx)
vmovdqu 320(%rdi), %ymm2
vmovdqu 352(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 320(%r8), %ymm4
vmovdqu 352(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 320(%r15), %ymm10
vmovdqu 352(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 320(%rdx), %ymm6
vmovdqu 352(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 320(%rdx)
vmovdqu %ymm1, 352(%rdx)
vmovdqu 384(%rdi), %ymm2
vmovdqu 416(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 384(%r8), %ymm4
vmovdqu 416(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 384(%r15), %ymm10
vmovdqu 416(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 384(%rdx), %ymm6
vmovdqu 416(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 384(%rdx)
vmovdqu %ymm1, 416(%rdx)
vmovdqu 448(%rdi), %ymm2
vmovdqu 480(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 448(%r8), %ymm4
vmovdqu 480(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 448(%r15), %ymm10
vmovdqu 480(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 448(%rdx), %ymm6
vmovdqu 480(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 448(%rdx)
vmovdqu %ymm1, 480(%rdx)
addq $0x200, %rdi
addq $0x200, %r8
subq $0x01, %r13
jg L_pointwise_acc_mont_start_encap_v
L_pointwise_acc_mont_end_encap_v:
# Base mul mont add
leaq L_kyber_avx2_zetas_basemul(%rip), %r15
vmovdqu (%rdi), %ymm2
vmovdqu 32(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu (%r8), %ymm4
vmovdqu 32(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu (%r15), %ymm10
vmovdqu 32(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu (%rdx), %ymm6
vmovdqu 32(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, (%rdx)
vmovdqu %ymm1, 32(%rdx)
vmovdqu 64(%rdi), %ymm2
vmovdqu 96(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 64(%r8), %ymm4
vmovdqu 96(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 64(%r15), %ymm10
vmovdqu 96(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 64(%rdx), %ymm6
vmovdqu 96(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 64(%rdx)
vmovdqu %ymm1, 96(%rdx)
vmovdqu 128(%rdi), %ymm2
vmovdqu 160(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 128(%r8), %ymm4
vmovdqu 160(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 128(%r15), %ymm10
vmovdqu 160(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 128(%rdx), %ymm6
vmovdqu 160(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 128(%rdx)
vmovdqu %ymm1, 160(%rdx)
vmovdqu 192(%rdi), %ymm2
vmovdqu 224(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 192(%r8), %ymm4
vmovdqu 224(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 192(%r15), %ymm10
vmovdqu 224(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 192(%rdx), %ymm6
vmovdqu 224(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 192(%rdx)
vmovdqu %ymm1, 224(%rdx)
vmovdqu 256(%rdi), %ymm2
vmovdqu 288(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 256(%r8), %ymm4
vmovdqu 288(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 256(%r15), %ymm10
vmovdqu 288(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 256(%rdx), %ymm6
vmovdqu 288(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 256(%rdx)
vmovdqu %ymm1, 288(%rdx)
vmovdqu 320(%rdi), %ymm2
vmovdqu 352(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 320(%r8), %ymm4
vmovdqu 352(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 320(%r15), %ymm10
vmovdqu 352(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 320(%rdx), %ymm6
vmovdqu 352(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 320(%rdx)
vmovdqu %ymm1, 352(%rdx)
vmovdqu 384(%rdi), %ymm2
vmovdqu 416(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 384(%r8), %ymm4
vmovdqu 416(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 384(%r15), %ymm10
vmovdqu 416(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 384(%rdx), %ymm6
vmovdqu 416(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 384(%rdx)
vmovdqu %ymm1, 416(%rdx)
vmovdqu 448(%rdi), %ymm2
vmovdqu 480(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 448(%r8), %ymm4
vmovdqu 480(%r8), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 448(%r15), %ymm10
vmovdqu 480(%r15), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 448(%rdx), %ymm6
vmovdqu 480(%rdx), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 448(%rdx)
vmovdqu %ymm1, 480(%rdx)
addq $0x200, %rdi
addq $0x200, %r8
movq %r11, %r13
shl $9, %r13d
subq %r13, %r8
# invntt
leaq L_kyber_avx2_zetas_inv(%rip), %r15
vmovdqu (%rdx), %ymm0
vmovdqu 32(%rdx), %ymm1
vmovdqu 64(%rdx), %ymm2
vmovdqu 96(%rdx), %ymm3
vmovdqu 128(%rdx), %ymm4
vmovdqu 160(%rdx), %ymm5
vmovdqu 192(%rdx), %ymm6
vmovdqu 224(%rdx), %ymm7
# 2: 1/2
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu (%r15), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm9
vmovdqu 32(%r15), %ymm12
vpsllq $32, %ymm9, %ymm0
vpsrlq $32, %ymm8, %ymm1
vpblendd $0xaa, %ymm0, %ymm8, %ymm0
vpblendd $0x55, %ymm1, %ymm9, %ymm1
vperm2i128 $32, %ymm3, %ymm2, %ymm8
vmovdqu 64(%r15), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm9
vmovdqu 96(%r15), %ymm13
vpsllq $32, %ymm9, %ymm2
vpsrlq $32, %ymm8, %ymm3
vpblendd $0xaa, %ymm2, %ymm8, %ymm2
vpblendd $0x55, %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 4: 1/2
vmovdqu 128(%r15), %ymm10
vmovdqu 160(%r15), %ymm12
vmovdqu 192(%r15), %ymm11
vmovdqu 224(%r15), %ymm13
vpunpckldq %ymm1, %ymm8, %ymm0
vpunpckhdq %ymm1, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm9, %ymm2
vpunpckhdq %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmulhw %ymm15, %ymm8, %ymm0
vpmulhw %ymm15, %ymm9, %ymm2
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm2, %ymm2
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm8, %ymm8
vpsubw %ymm2, %ymm9, %ymm9
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 8: 1/2
vmovdqu 256(%r15), %ymm10
vmovdqu 288(%r15), %ymm12
vmovdqu 320(%r15), %ymm11
vmovdqu 352(%r15), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 16: 1/2
vperm2i128 $32, %ymm1, %ymm8, %ymm0
vmovdqu 384(%r15), %ymm10
vperm2i128 $49, %ymm1, %ymm8, %ymm1
vmovdqu 416(%r15), %ymm12
vperm2i128 $32, %ymm3, %ymm9, %ymm2
vmovdqu 448(%r15), %ymm11
vperm2i128 $49, %ymm3, %ymm9, %ymm3
vmovdqu 480(%r15), %ymm13
vpsubw %ymm1, %ymm0, %ymm8
vpsubw %ymm3, %ymm2, %ymm9
vpaddw %ymm1, %ymm0, %ymm0
vpaddw %ymm3, %ymm2, %ymm2
vpmullw %ymm12, %ymm8, %ymm1
vpmullw %ymm13, %ymm9, %ymm3
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm11, %ymm9, %ymm9
vpmulhw %ymm14, %ymm1, %ymm1
vpmulhw %ymm14, %ymm3, %ymm3
vpsubw %ymm1, %ymm8, %ymm1
vpsubw %ymm3, %ymm9, %ymm3
# 32: 1/2
vmovdqu 512(%r15), %ymm10
vmovdqu 544(%r15), %ymm12
vpaddw %ymm2, %ymm0, %ymm8
vpaddw %ymm3, %ymm1, %ymm9
vpsubw %ymm2, %ymm0, %ymm2
vpsubw %ymm3, %ymm1, %ymm3
vpmulhw %ymm15, %ymm8, %ymm0
vpmulhw %ymm15, %ymm9, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm8, %ymm0
vpsubw %ymm1, %ymm9, %ymm1
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm9, %ymm3, %ymm3
# 2: 1/2
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 576(%r15), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm9
vmovdqu 608(%r15), %ymm12
vpsllq $32, %ymm9, %ymm4
vpsrlq $32, %ymm8, %ymm5
vpblendd $0xaa, %ymm4, %ymm8, %ymm4
vpblendd $0x55, %ymm5, %ymm9, %ymm5
vperm2i128 $32, %ymm7, %ymm6, %ymm8
vmovdqu 640(%r15), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm9
vmovdqu 672(%r15), %ymm13
vpsllq $32, %ymm9, %ymm6
vpsrlq $32, %ymm8, %ymm7
vpblendd $0xaa, %ymm6, %ymm8, %ymm6
vpblendd $0x55, %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 4: 1/2
vmovdqu 704(%r15), %ymm10
vmovdqu 736(%r15), %ymm12
vmovdqu 768(%r15), %ymm11
vmovdqu 800(%r15), %ymm13
vpunpckldq %ymm5, %ymm8, %ymm4
vpunpckhdq %ymm5, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm9, %ymm6
vpunpckhdq %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmulhw %ymm15, %ymm8, %ymm4
vpmulhw %ymm15, %ymm9, %ymm6
vpsraw $10, %ymm4, %ymm4
vpsraw $10, %ymm6, %ymm6
vpmullw %ymm14, %ymm4, %ymm4
vpmullw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm8, %ymm8
vpsubw %ymm6, %ymm9, %ymm9
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 8: 1/2
vmovdqu 832(%r15), %ymm10
vmovdqu 864(%r15), %ymm12
vmovdqu 896(%r15), %ymm11
vmovdqu 928(%r15), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 16: 1/2
vperm2i128 $32, %ymm5, %ymm8, %ymm4
vmovdqu 960(%r15), %ymm10
vperm2i128 $49, %ymm5, %ymm8, %ymm5
vmovdqu 992(%r15), %ymm12
vperm2i128 $32, %ymm7, %ymm9, %ymm6
vmovdqu 1024(%r15), %ymm11
vperm2i128 $49, %ymm7, %ymm9, %ymm7
vmovdqu 1056(%r15), %ymm13
vpsubw %ymm5, %ymm4, %ymm8
vpsubw %ymm7, %ymm6, %ymm9
vpaddw %ymm5, %ymm4, %ymm4
vpaddw %ymm7, %ymm6, %ymm6
vpmullw %ymm12, %ymm8, %ymm5
vpmullw %ymm13, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm11, %ymm9, %ymm9
vpmulhw %ymm14, %ymm5, %ymm5
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm5, %ymm8, %ymm5
vpsubw %ymm7, %ymm9, %ymm7
# 32: 1/2
vmovdqu 1088(%r15), %ymm10
vmovdqu 1120(%r15), %ymm12
vpaddw %ymm6, %ymm4, %ymm8
vpaddw %ymm7, %ymm5, %ymm9
vpsubw %ymm6, %ymm4, %ymm6
vpsubw %ymm7, %ymm5, %ymm7
vpmulhw %ymm15, %ymm8, %ymm4
vpmulhw %ymm15, %ymm9, %ymm5
vpsraw $10, %ymm4, %ymm4
vpsraw $10, %ymm5, %ymm5
vpmullw %ymm14, %ymm4, %ymm4
vpmullw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm6
vpsubw %ymm9, %ymm7, %ymm7
# 64: 1/2
vmovdqu 1152(%r15), %ymm10
vmovdqu 1184(%r15), %ymm12
vpsubw %ymm4, %ymm0, %ymm8
vpsubw %ymm5, %ymm1, %ymm9
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpmullw %ymm12, %ymm8, %ymm4
vpmullw %ymm12, %ymm9, %ymm5
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpsubw %ymm6, %ymm2, %ymm8
vpsubw %ymm7, %ymm3, %ymm9
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm8, %ymm6
vpmullw %ymm12, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm6, %ymm6
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm8, %ymm6
vpsubw %ymm7, %ymm9, %ymm7
vmovdqu %ymm0, (%rdx)
vmovdqu %ymm1, 32(%rdx)
vmovdqu %ymm2, 64(%rdx)
vmovdqu %ymm3, 96(%rdx)
vmovdqu %ymm4, 128(%rdx)
vmovdqu %ymm5, 160(%rdx)
vmovdqu %ymm6, 192(%rdx)
vmovdqu %ymm7, 224(%rdx)
vmovdqu 256(%rdx), %ymm0
vmovdqu 288(%rdx), %ymm1
vmovdqu 320(%rdx), %ymm2
vmovdqu 352(%rdx), %ymm3
vmovdqu 384(%rdx), %ymm4
vmovdqu 416(%rdx), %ymm5
vmovdqu 448(%rdx), %ymm6
vmovdqu 480(%rdx), %ymm7
# 2: 2/2
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu 1216(%r15), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm9
vmovdqu 1248(%r15), %ymm12
vpsllq $32, %ymm9, %ymm0
vpsrlq $32, %ymm8, %ymm1
vpblendd $0xaa, %ymm0, %ymm8, %ymm0
vpblendd $0x55, %ymm1, %ymm9, %ymm1
vperm2i128 $32, %ymm3, %ymm2, %ymm8
vmovdqu 1280(%r15), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm9
vmovdqu 1312(%r15), %ymm13
vpsllq $32, %ymm9, %ymm2
vpsrlq $32, %ymm8, %ymm3
vpblendd $0xaa, %ymm2, %ymm8, %ymm2
vpblendd $0x55, %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 4: 2/2
vmovdqu 1344(%r15), %ymm10
vmovdqu 1376(%r15), %ymm12
vmovdqu 1408(%r15), %ymm11
vmovdqu 1440(%r15), %ymm13
vpunpckldq %ymm1, %ymm8, %ymm0
vpunpckhdq %ymm1, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm9, %ymm2
vpunpckhdq %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmulhw %ymm15, %ymm8, %ymm0
vpmulhw %ymm15, %ymm9, %ymm2
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm2, %ymm2
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm8, %ymm8
vpsubw %ymm2, %ymm9, %ymm9
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 8: 2/2
vmovdqu 1472(%r15), %ymm10
vmovdqu 1504(%r15), %ymm12
vmovdqu 1536(%r15), %ymm11
vmovdqu 1568(%r15), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 16: 2/2
vperm2i128 $32, %ymm1, %ymm8, %ymm0
vmovdqu 1600(%r15), %ymm10
vperm2i128 $49, %ymm1, %ymm8, %ymm1
vmovdqu 1632(%r15), %ymm12
vperm2i128 $32, %ymm3, %ymm9, %ymm2
vmovdqu 1664(%r15), %ymm11
vperm2i128 $49, %ymm3, %ymm9, %ymm3
vmovdqu 1696(%r15), %ymm13
vpsubw %ymm1, %ymm0, %ymm8
vpsubw %ymm3, %ymm2, %ymm9
vpaddw %ymm1, %ymm0, %ymm0
vpaddw %ymm3, %ymm2, %ymm2
vpmullw %ymm12, %ymm8, %ymm1
vpmullw %ymm13, %ymm9, %ymm3
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm11, %ymm9, %ymm9
vpmulhw %ymm14, %ymm1, %ymm1
vpmulhw %ymm14, %ymm3, %ymm3
vpsubw %ymm1, %ymm8, %ymm1
vpsubw %ymm3, %ymm9, %ymm3
# 32: 2/2
vmovdqu 1728(%r15), %ymm10
vmovdqu 1760(%r15), %ymm12
vpaddw %ymm2, %ymm0, %ymm8
vpaddw %ymm3, %ymm1, %ymm9
vpsubw %ymm2, %ymm0, %ymm2
vpsubw %ymm3, %ymm1, %ymm3
vpmulhw %ymm15, %ymm8, %ymm0
vpmulhw %ymm15, %ymm9, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm8, %ymm0
vpsubw %ymm1, %ymm9, %ymm1
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm9, %ymm3, %ymm3
# 2: 2/2
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 1792(%r15), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm9
vmovdqu 1824(%r15), %ymm12
vpsllq $32, %ymm9, %ymm4
vpsrlq $32, %ymm8, %ymm5
vpblendd $0xaa, %ymm4, %ymm8, %ymm4
vpblendd $0x55, %ymm5, %ymm9, %ymm5
vperm2i128 $32, %ymm7, %ymm6, %ymm8
vmovdqu 1856(%r15), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm9
vmovdqu 1888(%r15), %ymm13
vpsllq $32, %ymm9, %ymm6
vpsrlq $32, %ymm8, %ymm7
vpblendd $0xaa, %ymm6, %ymm8, %ymm6
vpblendd $0x55, %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 4: 2/2
vmovdqu 1920(%r15), %ymm10
vmovdqu 1952(%r15), %ymm12
vmovdqu 1984(%r15), %ymm11
vmovdqu 2016(%r15), %ymm13
vpunpckldq %ymm5, %ymm8, %ymm4
vpunpckhdq %ymm5, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm9, %ymm6
vpunpckhdq %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmulhw %ymm15, %ymm8, %ymm4
vpmulhw %ymm15, %ymm9, %ymm6
vpsraw $10, %ymm4, %ymm4
vpsraw $10, %ymm6, %ymm6
vpmullw %ymm14, %ymm4, %ymm4
vpmullw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm8, %ymm8
vpsubw %ymm6, %ymm9, %ymm9
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 8: 2/2
vmovdqu 2048(%r15), %ymm10
vmovdqu 2080(%r15), %ymm12
vmovdqu 2112(%r15), %ymm11
vmovdqu 2144(%r15), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 16: 2/2
vperm2i128 $32, %ymm5, %ymm8, %ymm4
vmovdqu 2176(%r15), %ymm10
vperm2i128 $49, %ymm5, %ymm8, %ymm5
vmovdqu 2208(%r15), %ymm12
vperm2i128 $32, %ymm7, %ymm9, %ymm6
vmovdqu 2240(%r15), %ymm11
vperm2i128 $49, %ymm7, %ymm9, %ymm7
vmovdqu 2272(%r15), %ymm13
vpsubw %ymm5, %ymm4, %ymm8
vpsubw %ymm7, %ymm6, %ymm9
vpaddw %ymm5, %ymm4, %ymm4
vpaddw %ymm7, %ymm6, %ymm6
vpmullw %ymm12, %ymm8, %ymm5
vpmullw %ymm13, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm11, %ymm9, %ymm9
vpmulhw %ymm14, %ymm5, %ymm5
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm5, %ymm8, %ymm5
vpsubw %ymm7, %ymm9, %ymm7
# 32: 2/2
vmovdqu 2304(%r15), %ymm10
vmovdqu 2336(%r15), %ymm12
vpaddw %ymm6, %ymm4, %ymm8
vpaddw %ymm7, %ymm5, %ymm9
vpsubw %ymm6, %ymm4, %ymm6
vpsubw %ymm7, %ymm5, %ymm7
vpmulhw %ymm15, %ymm8, %ymm4
vpmulhw %ymm15, %ymm9, %ymm5
vpsraw $10, %ymm4, %ymm4
vpsraw $10, %ymm5, %ymm5
vpmullw %ymm14, %ymm4, %ymm4
vpmullw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm6
vpsubw %ymm9, %ymm7, %ymm7
# 64: 2/2
vmovdqu 2368(%r15), %ymm10
vmovdqu 2400(%r15), %ymm12
vpsubw %ymm4, %ymm0, %ymm8
vpsubw %ymm5, %ymm1, %ymm9
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpmullw %ymm12, %ymm8, %ymm4
vpmullw %ymm12, %ymm9, %ymm5
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpsubw %ymm6, %ymm2, %ymm8
vpsubw %ymm7, %ymm3, %ymm9
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm8, %ymm6
vpmullw %ymm12, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm6, %ymm6
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm8, %ymm6
vpsubw %ymm7, %ymm9, %ymm7
vmovdqu %ymm0, 256(%rdx)
vmovdqu %ymm1, 288(%rdx)
vmovdqu %ymm2, 320(%rdx)
vmovdqu %ymm3, 352(%rdx)
# 128
vmovdqu 2432(%r15), %ymm10
vmovdqu 2464(%r15), %ymm12
vmovdqu 2496(%r15), %ymm11
vmovdqu 2528(%r15), %ymm13
vmovdqu 128(%rdx), %ymm0
vmovdqu 160(%rdx), %ymm1
vmovdqu 192(%rdx), %ymm2
vmovdqu 224(%rdx), %ymm3
vpsubw %ymm4, %ymm0, %ymm8
vpsubw %ymm5, %ymm1, %ymm9
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpmullw %ymm12, %ymm8, %ymm4
vpmullw %ymm12, %ymm9, %ymm5
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpsubw %ymm6, %ymm2, %ymm8
vpsubw %ymm7, %ymm3, %ymm9
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm8, %ymm6
vpmullw %ymm12, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm6, %ymm6
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm8, %ymm6
vpsubw %ymm7, %ymm9, %ymm7
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm11, %ymm0, %ymm0
vpmulhw %ymm11, %ymm1, %ymm1
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm0, %ymm0
vpsubw %ymm9, %ymm1, %ymm1
vpmullw %ymm13, %ymm2, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm11, %ymm2, %ymm2
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm9, %ymm3, %ymm3
vpmullw %ymm13, %ymm4, %ymm8
vpmullw %ymm13, %ymm5, %ymm9
vpmulhw %ymm11, %ymm4, %ymm4
vpmulhw %ymm11, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm4
vpsubw %ymm9, %ymm5, %ymm5
vpmullw %ymm13, %ymm6, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm11, %ymm6, %ymm6
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm6
vpsubw %ymm9, %ymm7, %ymm7
vmovdqu %ymm0, 128(%rdx)
vmovdqu %ymm1, 160(%rdx)
vmovdqu %ymm2, 192(%rdx)
vmovdqu %ymm3, 224(%rdx)
vmovdqu %ymm4, 384(%rdx)
vmovdqu %ymm5, 416(%rdx)
vmovdqu %ymm6, 448(%rdx)
vmovdqu %ymm7, 480(%rdx)
vmovdqu (%rdx), %ymm0
vmovdqu 32(%rdx), %ymm1
vmovdqu 64(%rdx), %ymm2
vmovdqu 96(%rdx), %ymm3
vmovdqu 256(%rdx), %ymm4
vmovdqu 288(%rdx), %ymm5
vmovdqu 320(%rdx), %ymm6
vmovdqu 352(%rdx), %ymm7
vpsubw %ymm4, %ymm0, %ymm8
vpsubw %ymm5, %ymm1, %ymm9
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpmullw %ymm12, %ymm8, %ymm4
vpmullw %ymm12, %ymm9, %ymm5
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpsubw %ymm6, %ymm2, %ymm8
vpsubw %ymm7, %ymm3, %ymm9
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm8, %ymm6
vpmullw %ymm12, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm6, %ymm6
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm8, %ymm6
vpsubw %ymm7, %ymm9, %ymm7
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm11, %ymm0, %ymm0
vpmulhw %ymm11, %ymm1, %ymm1
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm0, %ymm0
vpsubw %ymm9, %ymm1, %ymm1
vpmullw %ymm13, %ymm2, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm11, %ymm2, %ymm2
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm9, %ymm3, %ymm3
vpmullw %ymm13, %ymm4, %ymm8
vpmullw %ymm13, %ymm5, %ymm9
vpmulhw %ymm11, %ymm4, %ymm4
vpmulhw %ymm11, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm4
vpsubw %ymm9, %ymm5, %ymm5
vpmullw %ymm13, %ymm6, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm11, %ymm6, %ymm6
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm6
vpsubw %ymm9, %ymm7, %ymm7
vmovdqu %ymm0, (%rdx)
vmovdqu %ymm1, 32(%rdx)
vmovdqu %ymm2, 64(%rdx)
vmovdqu %ymm3, 96(%rdx)
vmovdqu %ymm4, 256(%rdx)
vmovdqu %ymm5, 288(%rdx)
vmovdqu %ymm6, 320(%rdx)
vmovdqu %ymm7, 352(%rdx)
# Add Errors
vmovdqu (%rdx), %ymm0
vmovdqu 32(%rdx), %ymm1
vmovdqu 64(%rdx), %ymm2
vmovdqu 96(%rdx), %ymm3
vmovdqu (%rax), %ymm4
vmovdqu 32(%rax), %ymm5
vmovdqu 64(%rax), %ymm6
vmovdqu 96(%rax), %ymm7
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vmovdqu %ymm0, (%rdx)
vmovdqu %ymm1, 32(%rdx)
vmovdqu %ymm2, 64(%rdx)
vmovdqu %ymm3, 96(%rdx)
vmovdqu 128(%rdx), %ymm0
vmovdqu 160(%rdx), %ymm1
vmovdqu 192(%rdx), %ymm2
vmovdqu 224(%rdx), %ymm3
vmovdqu 128(%rax), %ymm4
vmovdqu 160(%rax), %ymm5
vmovdqu 192(%rax), %ymm6
vmovdqu 224(%rax), %ymm7
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vmovdqu %ymm0, 128(%rdx)
vmovdqu %ymm1, 160(%rdx)
vmovdqu %ymm2, 192(%rdx)
vmovdqu %ymm3, 224(%rdx)
vmovdqu 256(%rdx), %ymm0
vmovdqu 288(%rdx), %ymm1
vmovdqu 320(%rdx), %ymm2
vmovdqu 352(%rdx), %ymm3
vmovdqu 256(%rax), %ymm4
vmovdqu 288(%rax), %ymm5
vmovdqu 320(%rax), %ymm6
vmovdqu 352(%rax), %ymm7
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vmovdqu %ymm0, 256(%rdx)
vmovdqu %ymm1, 288(%rdx)
vmovdqu %ymm2, 320(%rdx)
vmovdqu %ymm3, 352(%rdx)
vmovdqu 384(%rdx), %ymm0
vmovdqu 416(%rdx), %ymm1
vmovdqu 448(%rdx), %ymm2
vmovdqu 480(%rdx), %ymm3
vmovdqu 384(%rax), %ymm4
vmovdqu 416(%rax), %ymm5
vmovdqu 448(%rax), %ymm6
vmovdqu 480(%rax), %ymm7
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vmovdqu %ymm0, 384(%rdx)
vmovdqu %ymm1, 416(%rdx)
vmovdqu %ymm2, 448(%rdx)
vmovdqu %ymm3, 480(%rdx)
# Add Errors
vmovdqu (%rdx), %ymm0
vmovdqu 32(%rdx), %ymm1
vmovdqu 64(%rdx), %ymm2
vmovdqu 96(%rdx), %ymm3
vmovdqu (%r10), %ymm4
vmovdqu 32(%r10), %ymm5
vmovdqu 64(%r10), %ymm6
vmovdqu 96(%r10), %ymm7
vpaddw %ymm4, %ymm0, %ymm4
vpaddw %ymm5, %ymm1, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm7, %ymm3, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, (%rdx)
vmovdqu %ymm1, 32(%rdx)
vmovdqu %ymm2, 64(%rdx)
vmovdqu %ymm3, 96(%rdx)
vmovdqu 128(%rdx), %ymm0
vmovdqu 160(%rdx), %ymm1
vmovdqu 192(%rdx), %ymm2
vmovdqu 224(%rdx), %ymm3
vmovdqu 128(%r10), %ymm4
vmovdqu 160(%r10), %ymm5
vmovdqu 192(%r10), %ymm6
vmovdqu 224(%r10), %ymm7
vpaddw %ymm4, %ymm0, %ymm4
vpaddw %ymm5, %ymm1, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm7, %ymm3, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, 128(%rdx)
vmovdqu %ymm1, 160(%rdx)
vmovdqu %ymm2, 192(%rdx)
vmovdqu %ymm3, 224(%rdx)
vmovdqu 256(%rdx), %ymm0
vmovdqu 288(%rdx), %ymm1
vmovdqu 320(%rdx), %ymm2
vmovdqu 352(%rdx), %ymm3
vmovdqu 256(%r10), %ymm4
vmovdqu 288(%r10), %ymm5
vmovdqu 320(%r10), %ymm6
vmovdqu 352(%r10), %ymm7
vpaddw %ymm4, %ymm0, %ymm4
vpaddw %ymm5, %ymm1, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm7, %ymm3, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, 256(%rdx)
vmovdqu %ymm1, 288(%rdx)
vmovdqu %ymm2, 320(%rdx)
vmovdqu %ymm3, 352(%rdx)
vmovdqu 384(%rdx), %ymm0
vmovdqu 416(%rdx), %ymm1
vmovdqu 448(%rdx), %ymm2
vmovdqu 480(%rdx), %ymm3
vmovdqu 384(%r10), %ymm4
vmovdqu 416(%r10), %ymm5
vmovdqu 448(%r10), %ymm6
vmovdqu 480(%r10), %ymm7
vpaddw %ymm4, %ymm0, %ymm4
vpaddw %ymm5, %ymm1, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm7, %ymm3, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, 384(%rdx)
vmovdqu %ymm1, 416(%rdx)
vmovdqu %ymm2, 448(%rdx)
vmovdqu %ymm3, 480(%rdx)
vzeroupper
addq $48, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
repz retq
#ifndef __APPLE__
.size kyber_encapsulate_avx2,.-kyber_encapsulate_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl kyber_decapsulate_avx2
.type kyber_decapsulate_avx2,@function
.align 16
kyber_decapsulate_avx2:
#else
.section __TEXT,__text
.globl _kyber_decapsulate_avx2
.p2align 4
_kyber_decapsulate_avx2:
#endif /* __APPLE__ */
vmovdqu kyber_q(%rip), %ymm14
vmovdqu kyber_v(%rip), %ymm15
movq %r8, %rax
movq %rdx, %r9
L_kyber_decapsulate_avx2_trans:
# ntt
leaq L_kyber_avx2_zetas(%rip), %r10
vmovdqu (%r10), %ymm10
vmovdqu 32(%r10), %ymm12
vmovdqu 128(%r9), %ymm0
vmovdqu 160(%r9), %ymm1
vmovdqu 192(%r9), %ymm2
vmovdqu 224(%r9), %ymm3
vmovdqu 384(%r9), %ymm4
vmovdqu 416(%r9), %ymm5
vmovdqu 448(%r9), %ymm6
vmovdqu 480(%r9), %ymm7
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
vmovdqu %ymm0, 128(%r9)
vmovdqu %ymm1, 160(%r9)
vmovdqu %ymm2, 192(%r9)
vmovdqu %ymm3, 224(%r9)
vmovdqu %ymm4, 384(%r9)
vmovdqu %ymm5, 416(%r9)
vmovdqu %ymm6, 448(%r9)
vmovdqu %ymm7, 480(%r9)
vmovdqu (%r9), %ymm0
vmovdqu 32(%r9), %ymm1
vmovdqu 64(%r9), %ymm2
vmovdqu 96(%r9), %ymm3
vmovdqu 256(%r9), %ymm4
vmovdqu 288(%r9), %ymm5
vmovdqu 320(%r9), %ymm6
vmovdqu 352(%r9), %ymm7
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
vmovdqu %ymm4, 256(%r9)
vmovdqu %ymm5, 288(%r9)
vmovdqu %ymm6, 320(%r9)
vmovdqu %ymm7, 352(%r9)
vmovdqu 128(%r9), %ymm4
vmovdqu 160(%r9), %ymm5
vmovdqu 192(%r9), %ymm6
vmovdqu 224(%r9), %ymm7
# 64: 0/3
vmovdqu 64(%r10), %ymm10
vmovdqu 96(%r10), %ymm12
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
# 32: 0/3
vmovdqu 128(%r10), %ymm10
vmovdqu 160(%r10), %ymm12
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm2
vpsubw %ymm9, %ymm1, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
# 32: 0/3
vmovdqu 192(%r10), %ymm10
vmovdqu 224(%r10), %ymm12
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm6
vpsubw %ymm9, %ymm5, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm5, %ymm5
# 16: 0/3
vmovdqu 256(%r10), %ymm10
vmovdqu 288(%r10), %ymm12
vmovdqu 320(%r10), %ymm11
vmovdqu 352(%r10), %ymm13
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 16: 0/3
vmovdqu 384(%r10), %ymm10
vmovdqu 416(%r10), %ymm12
vmovdqu 448(%r10), %ymm11
vmovdqu 480(%r10), %ymm13
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 8: 0/3
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu 512(%r10), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm1
vmovdqu 544(%r10), %ymm12
vperm2i128 $32, %ymm3, %ymm2, %ymm9
vmovdqu 576(%r10), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm3
vmovdqu 608(%r10), %ymm13
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm0
vpsubw %ymm2, %ymm3, %ymm2
vpsubw %ymm0, %ymm8, %ymm1
vpsubw %ymm2, %ymm9, %ymm3
vpaddw %ymm0, %ymm8, %ymm8
vpaddw %ymm2, %ymm9, %ymm9
# 4: 0/3
vmovdqu 640(%r10), %ymm10
vmovdqu 672(%r10), %ymm12
vmovdqu 704(%r10), %ymm11
vmovdqu 736(%r10), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 8: 0/3
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 768(%r10), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm5
vmovdqu 800(%r10), %ymm12
vperm2i128 $32, %ymm7, %ymm6, %ymm9
vmovdqu 832(%r10), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm7
vmovdqu 864(%r10), %ymm13
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm4
vpsubw %ymm6, %ymm7, %ymm6
vpsubw %ymm4, %ymm8, %ymm5
vpsubw %ymm6, %ymm9, %ymm7
vpaddw %ymm4, %ymm8, %ymm8
vpaddw %ymm6, %ymm9, %ymm9
# 4: 0/3
vmovdqu 896(%r10), %ymm10
vmovdqu 928(%r10), %ymm12
vmovdqu 960(%r10), %ymm11
vmovdqu 992(%r10), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 2: 0/3
vmovdqu 1024(%r10), %ymm10
vmovdqu 1056(%r10), %ymm12
vmovdqu 1088(%r10), %ymm11
vmovdqu 1120(%r10), %ymm13
vpsllq $32, %ymm1, %ymm8
vpsrlq $32, %ymm0, %ymm9
vpblendd $0xaa, %ymm8, %ymm0, %ymm0
vpblendd $0x55, %ymm9, %ymm1, %ymm1
vpsllq $32, %ymm3, %ymm8
vpsrlq $32, %ymm2, %ymm9
vpblendd $0xaa, %ymm8, %ymm2, %ymm2
vpblendd $0x55, %ymm9, %ymm3, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 2: 0/3
vmovdqu 1152(%r10), %ymm10
vmovdqu 1184(%r10), %ymm12
vmovdqu 1216(%r10), %ymm11
vmovdqu 1248(%r10), %ymm13
vpsllq $32, %ymm5, %ymm8
vpsrlq $32, %ymm4, %ymm9
vpblendd $0xaa, %ymm8, %ymm4, %ymm4
vpblendd $0x55, %ymm9, %ymm5, %ymm5
vpsllq $32, %ymm7, %ymm8
vpsrlq $32, %ymm6, %ymm9
vpblendd $0xaa, %ymm8, %ymm6, %ymm6
vpblendd $0x55, %ymm9, %ymm7, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
vpunpckldq %ymm1, %ymm0, %ymm8
vpunpckhdq %ymm1, %ymm0, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm0
vperm2i128 $49, %ymm9, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm2, %ymm8
vpunpckhdq %ymm3, %ymm2, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm2
vperm2i128 $49, %ymm9, %ymm8, %ymm3
vpunpckldq %ymm5, %ymm4, %ymm8
vpunpckhdq %ymm5, %ymm4, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm4
vperm2i128 $49, %ymm9, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm6, %ymm8
vpunpckhdq %ymm7, %ymm6, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm6
vperm2i128 $49, %ymm9, %ymm8, %ymm7
vpmulhw %ymm15, %ymm0, %ymm8
vpmulhw %ymm15, %ymm1, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm0, %ymm8
vpsubw %ymm9, %ymm1, %ymm9
vmovdqu %ymm8, (%r9)
vmovdqu %ymm9, 32(%r9)
vpmulhw %ymm15, %ymm2, %ymm8
vpmulhw %ymm15, %ymm3, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vmovdqu %ymm8, 64(%r9)
vmovdqu %ymm9, 96(%r9)
vpmulhw %ymm15, %ymm4, %ymm8
vpmulhw %ymm15, %ymm5, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vmovdqu %ymm8, 128(%r9)
vmovdqu %ymm9, 160(%r9)
vpmulhw %ymm15, %ymm6, %ymm8
vpmulhw %ymm15, %ymm7, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vmovdqu %ymm8, 192(%r9)
vmovdqu %ymm9, 224(%r9)
vmovdqu 256(%r9), %ymm0
vmovdqu 288(%r9), %ymm1
vmovdqu 320(%r9), %ymm2
vmovdqu 352(%r9), %ymm3
vmovdqu 384(%r9), %ymm4
vmovdqu 416(%r9), %ymm5
vmovdqu 448(%r9), %ymm6
vmovdqu 480(%r9), %ymm7
# 64: 1/3
vmovdqu 1280(%r10), %ymm10
vmovdqu 1312(%r10), %ymm12
vpmullw %ymm12, %ymm4, %ymm8
vpmullw %ymm12, %ymm5, %ymm9
vpmulhw %ymm10, %ymm4, %ymm4
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vpsubw %ymm8, %ymm0, %ymm4
vpsubw %ymm9, %ymm1, %ymm5
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm2, %ymm6
vpsubw %ymm9, %ymm3, %ymm7
vpaddw %ymm8, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
# 32: 1/3
vmovdqu 1344(%r10), %ymm10
vmovdqu 1376(%r10), %ymm12
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm2
vpsubw %ymm9, %ymm1, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
# 32: 1/3
vmovdqu 1408(%r10), %ymm10
vmovdqu 1440(%r10), %ymm12
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm6
vpsubw %ymm9, %ymm5, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm5, %ymm5
# 16: 1/3
vmovdqu 1472(%r10), %ymm10
vmovdqu 1504(%r10), %ymm12
vmovdqu 1536(%r10), %ymm11
vmovdqu 1568(%r10), %ymm13
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 16: 1/3
vmovdqu 1600(%r10), %ymm10
vmovdqu 1632(%r10), %ymm12
vmovdqu 1664(%r10), %ymm11
vmovdqu 1696(%r10), %ymm13
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 8: 1/3
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu 1728(%r10), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm1
vmovdqu 1760(%r10), %ymm12
vperm2i128 $32, %ymm3, %ymm2, %ymm9
vmovdqu 1792(%r10), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm3
vmovdqu 1824(%r10), %ymm13
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm0
vpsubw %ymm2, %ymm3, %ymm2
vpsubw %ymm0, %ymm8, %ymm1
vpsubw %ymm2, %ymm9, %ymm3
vpaddw %ymm0, %ymm8, %ymm8
vpaddw %ymm2, %ymm9, %ymm9
# 4: 1/3
vmovdqu 1856(%r10), %ymm10
vmovdqu 1888(%r10), %ymm12
vmovdqu 1920(%r10), %ymm11
vmovdqu 1952(%r10), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 8: 1/3
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 1984(%r10), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm5
vmovdqu 2016(%r10), %ymm12
vperm2i128 $32, %ymm7, %ymm6, %ymm9
vmovdqu 2048(%r10), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm7
vmovdqu 2080(%r10), %ymm13
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm4
vpsubw %ymm6, %ymm7, %ymm6
vpsubw %ymm4, %ymm8, %ymm5
vpsubw %ymm6, %ymm9, %ymm7
vpaddw %ymm4, %ymm8, %ymm8
vpaddw %ymm6, %ymm9, %ymm9
# 4: 1/3
vmovdqu 2112(%r10), %ymm10
vmovdqu 2144(%r10), %ymm12
vmovdqu 2176(%r10), %ymm11
vmovdqu 2208(%r10), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
# 2: 1/3
vmovdqu 2240(%r10), %ymm10
vmovdqu 2272(%r10), %ymm12
vmovdqu 2304(%r10), %ymm11
vmovdqu 2336(%r10), %ymm13
vpsllq $32, %ymm1, %ymm8
vpsrlq $32, %ymm0, %ymm9
vpblendd $0xaa, %ymm8, %ymm0, %ymm0
vpblendd $0x55, %ymm9, %ymm1, %ymm1
vpsllq $32, %ymm3, %ymm8
vpsrlq $32, %ymm2, %ymm9
vpblendd $0xaa, %ymm8, %ymm2, %ymm2
vpblendd $0x55, %ymm9, %ymm3, %ymm3
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm1, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vpsubw %ymm8, %ymm0, %ymm1
vpsubw %ymm9, %ymm2, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm2, %ymm2
# 2: 1/3
vmovdqu 2368(%r10), %ymm10
vmovdqu 2400(%r10), %ymm12
vmovdqu 2432(%r10), %ymm11
vmovdqu 2464(%r10), %ymm13
vpsllq $32, %ymm5, %ymm8
vpsrlq $32, %ymm4, %ymm9
vpblendd $0xaa, %ymm8, %ymm4, %ymm4
vpblendd $0x55, %ymm9, %ymm5, %ymm5
vpsllq $32, %ymm7, %ymm8
vpsrlq $32, %ymm6, %ymm9
vpblendd $0xaa, %ymm8, %ymm6, %ymm6
vpblendd $0x55, %ymm9, %ymm7, %ymm7
vpmullw %ymm12, %ymm5, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm5, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vpsubw %ymm8, %ymm4, %ymm5
vpsubw %ymm9, %ymm6, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm6, %ymm6
vpunpckldq %ymm1, %ymm0, %ymm8
vpunpckhdq %ymm1, %ymm0, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm0
vperm2i128 $49, %ymm9, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm2, %ymm8
vpunpckhdq %ymm3, %ymm2, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm2
vperm2i128 $49, %ymm9, %ymm8, %ymm3
vpunpckldq %ymm5, %ymm4, %ymm8
vpunpckhdq %ymm5, %ymm4, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm4
vperm2i128 $49, %ymm9, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm6, %ymm8
vpunpckhdq %ymm7, %ymm6, %ymm9
vperm2i128 $32, %ymm9, %ymm8, %ymm6
vperm2i128 $49, %ymm9, %ymm8, %ymm7
vpmulhw %ymm15, %ymm0, %ymm8
vpmulhw %ymm15, %ymm1, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm0, %ymm8
vpsubw %ymm9, %ymm1, %ymm9
vmovdqu %ymm8, 256(%r9)
vmovdqu %ymm9, 288(%r9)
vpmulhw %ymm15, %ymm2, %ymm8
vpmulhw %ymm15, %ymm3, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm8
vpsubw %ymm9, %ymm3, %ymm9
vmovdqu %ymm8, 320(%r9)
vmovdqu %ymm9, 352(%r9)
vpmulhw %ymm15, %ymm4, %ymm8
vpmulhw %ymm15, %ymm5, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm8
vpsubw %ymm9, %ymm5, %ymm9
vmovdqu %ymm8, 384(%r9)
vmovdqu %ymm9, 416(%r9)
vpmulhw %ymm15, %ymm6, %ymm8
vpmulhw %ymm15, %ymm7, %ymm9
vpsraw $10, %ymm8, %ymm8
vpsraw $10, %ymm9, %ymm9
vpmullw %ymm14, %ymm8, %ymm8
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm8
vpsubw %ymm9, %ymm7, %ymm9
vmovdqu %ymm8, 448(%r9)
vmovdqu %ymm9, 480(%r9)
addq $0x200, %r9
subq $0x01, %rax
jg L_kyber_decapsulate_avx2_trans
vmovdqu kyber_qinv(%rip), %ymm12
# Pointwise acc mont
movq %r8, %rax
# Base mul mont
leaq L_kyber_avx2_zetas_basemul(%rip), %r10
vmovdqu (%rdi), %ymm2
vmovdqu 32(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu (%rdx), %ymm4
vmovdqu 32(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu (%r10), %ymm10
vmovdqu 32(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm1, 32(%rsi)
vmovdqu 64(%rdi), %ymm2
vmovdqu 96(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 64(%rdx), %ymm4
vmovdqu 96(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 64(%r10), %ymm10
vmovdqu 96(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 64(%rsi)
vmovdqu %ymm1, 96(%rsi)
vmovdqu 128(%rdi), %ymm2
vmovdqu 160(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 128(%rdx), %ymm4
vmovdqu 160(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 128(%r10), %ymm10
vmovdqu 160(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 128(%rsi)
vmovdqu %ymm1, 160(%rsi)
vmovdqu 192(%rdi), %ymm2
vmovdqu 224(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 192(%rdx), %ymm4
vmovdqu 224(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 192(%r10), %ymm10
vmovdqu 224(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 192(%rsi)
vmovdqu %ymm1, 224(%rsi)
vmovdqu 256(%rdi), %ymm2
vmovdqu 288(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 256(%rdx), %ymm4
vmovdqu 288(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 256(%r10), %ymm10
vmovdqu 288(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 256(%rsi)
vmovdqu %ymm1, 288(%rsi)
vmovdqu 320(%rdi), %ymm2
vmovdqu 352(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 320(%rdx), %ymm4
vmovdqu 352(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 320(%r10), %ymm10
vmovdqu 352(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 320(%rsi)
vmovdqu %ymm1, 352(%rsi)
vmovdqu 384(%rdi), %ymm2
vmovdqu 416(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 384(%rdx), %ymm4
vmovdqu 416(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 384(%r10), %ymm10
vmovdqu 416(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 384(%rsi)
vmovdqu %ymm1, 416(%rsi)
vmovdqu 448(%rdi), %ymm2
vmovdqu 480(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 448(%rdx), %ymm4
vmovdqu 480(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 448(%r10), %ymm10
vmovdqu 480(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu %ymm0, 448(%rsi)
vmovdqu %ymm1, 480(%rsi)
addq $0x200, %rdi
addq $0x200, %rdx
subq $2, %rax
jz L_pointwise_acc_mont_end_decap
L_pointwise_acc_mont_start_decap:
# Base mul mont add
leaq L_kyber_avx2_zetas_basemul(%rip), %r10
vmovdqu (%rdi), %ymm2
vmovdqu 32(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu (%rdx), %ymm4
vmovdqu 32(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu (%r10), %ymm10
vmovdqu 32(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu (%rsi), %ymm6
vmovdqu 32(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm1, 32(%rsi)
vmovdqu 64(%rdi), %ymm2
vmovdqu 96(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 64(%rdx), %ymm4
vmovdqu 96(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 64(%r10), %ymm10
vmovdqu 96(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 64(%rsi), %ymm6
vmovdqu 96(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 64(%rsi)
vmovdqu %ymm1, 96(%rsi)
vmovdqu 128(%rdi), %ymm2
vmovdqu 160(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 128(%rdx), %ymm4
vmovdqu 160(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 128(%r10), %ymm10
vmovdqu 160(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 128(%rsi), %ymm6
vmovdqu 160(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 128(%rsi)
vmovdqu %ymm1, 160(%rsi)
vmovdqu 192(%rdi), %ymm2
vmovdqu 224(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 192(%rdx), %ymm4
vmovdqu 224(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 192(%r10), %ymm10
vmovdqu 224(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 192(%rsi), %ymm6
vmovdqu 224(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 192(%rsi)
vmovdqu %ymm1, 224(%rsi)
vmovdqu 256(%rdi), %ymm2
vmovdqu 288(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 256(%rdx), %ymm4
vmovdqu 288(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 256(%r10), %ymm10
vmovdqu 288(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 256(%rsi), %ymm6
vmovdqu 288(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 256(%rsi)
vmovdqu %ymm1, 288(%rsi)
vmovdqu 320(%rdi), %ymm2
vmovdqu 352(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 320(%rdx), %ymm4
vmovdqu 352(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 320(%r10), %ymm10
vmovdqu 352(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 320(%rsi), %ymm6
vmovdqu 352(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 320(%rsi)
vmovdqu %ymm1, 352(%rsi)
vmovdqu 384(%rdi), %ymm2
vmovdqu 416(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 384(%rdx), %ymm4
vmovdqu 416(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 384(%r10), %ymm10
vmovdqu 416(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 384(%rsi), %ymm6
vmovdqu 416(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 384(%rsi)
vmovdqu %ymm1, 416(%rsi)
vmovdqu 448(%rdi), %ymm2
vmovdqu 480(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 448(%rdx), %ymm4
vmovdqu 480(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 448(%r10), %ymm10
vmovdqu 480(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 448(%rsi), %ymm6
vmovdqu 480(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 448(%rsi)
vmovdqu %ymm1, 480(%rsi)
addq $0x200, %rdi
addq $0x200, %rdx
subq $0x01, %rax
jg L_pointwise_acc_mont_start_decap
L_pointwise_acc_mont_end_decap:
# Base mul mont add
leaq L_kyber_avx2_zetas_basemul(%rip), %r10
vmovdqu (%rdi), %ymm2
vmovdqu 32(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu (%rdx), %ymm4
vmovdqu 32(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu (%r10), %ymm10
vmovdqu 32(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu (%rsi), %ymm6
vmovdqu 32(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm1, 32(%rsi)
vmovdqu 64(%rdi), %ymm2
vmovdqu 96(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 64(%rdx), %ymm4
vmovdqu 96(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 64(%r10), %ymm10
vmovdqu 96(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 64(%rsi), %ymm6
vmovdqu 96(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 64(%rsi)
vmovdqu %ymm1, 96(%rsi)
vmovdqu 128(%rdi), %ymm2
vmovdqu 160(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 128(%rdx), %ymm4
vmovdqu 160(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 128(%r10), %ymm10
vmovdqu 160(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 128(%rsi), %ymm6
vmovdqu 160(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 128(%rsi)
vmovdqu %ymm1, 160(%rsi)
vmovdqu 192(%rdi), %ymm2
vmovdqu 224(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 192(%rdx), %ymm4
vmovdqu 224(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 192(%r10), %ymm10
vmovdqu 224(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 192(%rsi), %ymm6
vmovdqu 224(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 192(%rsi)
vmovdqu %ymm1, 224(%rsi)
vmovdqu 256(%rdi), %ymm2
vmovdqu 288(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 256(%rdx), %ymm4
vmovdqu 288(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 256(%r10), %ymm10
vmovdqu 288(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 256(%rsi), %ymm6
vmovdqu 288(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 256(%rsi)
vmovdqu %ymm1, 288(%rsi)
vmovdqu 320(%rdi), %ymm2
vmovdqu 352(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 320(%rdx), %ymm4
vmovdqu 352(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 320(%r10), %ymm10
vmovdqu 352(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 320(%rsi), %ymm6
vmovdqu 352(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 320(%rsi)
vmovdqu %ymm1, 352(%rsi)
vmovdqu 384(%rdi), %ymm2
vmovdqu 416(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 384(%rdx), %ymm4
vmovdqu 416(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 384(%r10), %ymm10
vmovdqu 416(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 384(%rsi), %ymm6
vmovdqu 416(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 384(%rsi)
vmovdqu %ymm1, 416(%rsi)
vmovdqu 448(%rdi), %ymm2
vmovdqu 480(%rdi), %ymm3
vpslld $16, %ymm3, %ymm6
vpsrld $16, %ymm2, %ymm7
vpblendw $0xaa, %ymm6, %ymm2, %ymm2
vpblendw $0x55, %ymm7, %ymm3, %ymm3
vmovdqu 448(%rdx), %ymm4
vmovdqu 480(%rdx), %ymm5
vpslld $16, %ymm5, %ymm6
vpsrld $16, %ymm4, %ymm7
vpblendw $0xaa, %ymm6, %ymm4, %ymm4
vpblendw $0x55, %ymm7, %ymm5, %ymm5
vmovdqu 448(%r10), %ymm10
vmovdqu 480(%r10), %ymm11
vpmullw %ymm5, %ymm3, %ymm0
vpmulhw %ymm5, %ymm3, %ymm6
vpmullw %ymm4, %ymm2, %ymm1
vpmulhw %ymm4, %ymm2, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm0, %ymm8
vpmullw %ymm12, %ymm1, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm0
vpsubw %ymm9, %ymm7, %ymm1
vpmullw %ymm11, %ymm0, %ymm6
vpmulhw %ymm10, %ymm0, %ymm7
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm6, %ymm7, %ymm0
vpaddw %ymm1, %ymm0, %ymm0
vpmullw %ymm5, %ymm2, %ymm1
vpmulhw %ymm5, %ymm2, %ymm6
vpmullw %ymm4, %ymm3, %ymm2
vpmulhw %ymm4, %ymm3, %ymm7
# Mont Reduce
vpmullw %ymm12, %ymm1, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm1
vpsubw %ymm9, %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm1
vmovdqu 448(%rsi), %ymm6
vmovdqu 480(%rsi), %ymm7
vpaddw %ymm6, %ymm0, %ymm0
vpaddw %ymm7, %ymm1, %ymm1
vpslld $16, %ymm1, %ymm6
vpsrld $16, %ymm0, %ymm7
vpblendw $0xaa, %ymm6, %ymm0, %ymm0
vpblendw $0x55, %ymm7, %ymm1, %ymm1
vmovdqu %ymm0, 448(%rsi)
vmovdqu %ymm1, 480(%rsi)
addq $0x200, %rdi
addq $0x200, %rdx
movq %r8, %rax
shl $9, %eax
subq %rax, %rdx
# invntt
leaq L_kyber_avx2_zetas_inv(%rip), %r10
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovdqu 64(%rsi), %ymm2
vmovdqu 96(%rsi), %ymm3
vmovdqu 128(%rsi), %ymm4
vmovdqu 160(%rsi), %ymm5
vmovdqu 192(%rsi), %ymm6
vmovdqu 224(%rsi), %ymm7
# 2: 1/2
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu (%r10), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm9
vmovdqu 32(%r10), %ymm12
vpsllq $32, %ymm9, %ymm0
vpsrlq $32, %ymm8, %ymm1
vpblendd $0xaa, %ymm0, %ymm8, %ymm0
vpblendd $0x55, %ymm1, %ymm9, %ymm1
vperm2i128 $32, %ymm3, %ymm2, %ymm8
vmovdqu 64(%r10), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm9
vmovdqu 96(%r10), %ymm13
vpsllq $32, %ymm9, %ymm2
vpsrlq $32, %ymm8, %ymm3
vpblendd $0xaa, %ymm2, %ymm8, %ymm2
vpblendd $0x55, %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 4: 1/2
vmovdqu 128(%r10), %ymm10
vmovdqu 160(%r10), %ymm12
vmovdqu 192(%r10), %ymm11
vmovdqu 224(%r10), %ymm13
vpunpckldq %ymm1, %ymm8, %ymm0
vpunpckhdq %ymm1, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm9, %ymm2
vpunpckhdq %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmulhw %ymm15, %ymm8, %ymm0
vpmulhw %ymm15, %ymm9, %ymm2
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm2, %ymm2
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm8, %ymm8
vpsubw %ymm2, %ymm9, %ymm9
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 8: 1/2
vmovdqu 256(%r10), %ymm10
vmovdqu 288(%r10), %ymm12
vmovdqu 320(%r10), %ymm11
vmovdqu 352(%r10), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 16: 1/2
vperm2i128 $32, %ymm1, %ymm8, %ymm0
vmovdqu 384(%r10), %ymm10
vperm2i128 $49, %ymm1, %ymm8, %ymm1
vmovdqu 416(%r10), %ymm12
vperm2i128 $32, %ymm3, %ymm9, %ymm2
vmovdqu 448(%r10), %ymm11
vperm2i128 $49, %ymm3, %ymm9, %ymm3
vmovdqu 480(%r10), %ymm13
vpsubw %ymm1, %ymm0, %ymm8
vpsubw %ymm3, %ymm2, %ymm9
vpaddw %ymm1, %ymm0, %ymm0
vpaddw %ymm3, %ymm2, %ymm2
vpmullw %ymm12, %ymm8, %ymm1
vpmullw %ymm13, %ymm9, %ymm3
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm11, %ymm9, %ymm9
vpmulhw %ymm14, %ymm1, %ymm1
vpmulhw %ymm14, %ymm3, %ymm3
vpsubw %ymm1, %ymm8, %ymm1
vpsubw %ymm3, %ymm9, %ymm3
# 32: 1/2
vmovdqu 512(%r10), %ymm10
vmovdqu 544(%r10), %ymm12
vpaddw %ymm2, %ymm0, %ymm8
vpaddw %ymm3, %ymm1, %ymm9
vpsubw %ymm2, %ymm0, %ymm2
vpsubw %ymm3, %ymm1, %ymm3
vpmulhw %ymm15, %ymm8, %ymm0
vpmulhw %ymm15, %ymm9, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm8, %ymm0
vpsubw %ymm1, %ymm9, %ymm1
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm9, %ymm3, %ymm3
# 2: 1/2
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 576(%r10), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm9
vmovdqu 608(%r10), %ymm12
vpsllq $32, %ymm9, %ymm4
vpsrlq $32, %ymm8, %ymm5
vpblendd $0xaa, %ymm4, %ymm8, %ymm4
vpblendd $0x55, %ymm5, %ymm9, %ymm5
vperm2i128 $32, %ymm7, %ymm6, %ymm8
vmovdqu 640(%r10), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm9
vmovdqu 672(%r10), %ymm13
vpsllq $32, %ymm9, %ymm6
vpsrlq $32, %ymm8, %ymm7
vpblendd $0xaa, %ymm6, %ymm8, %ymm6
vpblendd $0x55, %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 4: 1/2
vmovdqu 704(%r10), %ymm10
vmovdqu 736(%r10), %ymm12
vmovdqu 768(%r10), %ymm11
vmovdqu 800(%r10), %ymm13
vpunpckldq %ymm5, %ymm8, %ymm4
vpunpckhdq %ymm5, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm9, %ymm6
vpunpckhdq %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmulhw %ymm15, %ymm8, %ymm4
vpmulhw %ymm15, %ymm9, %ymm6
vpsraw $10, %ymm4, %ymm4
vpsraw $10, %ymm6, %ymm6
vpmullw %ymm14, %ymm4, %ymm4
vpmullw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm8, %ymm8
vpsubw %ymm6, %ymm9, %ymm9
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 8: 1/2
vmovdqu 832(%r10), %ymm10
vmovdqu 864(%r10), %ymm12
vmovdqu 896(%r10), %ymm11
vmovdqu 928(%r10), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 16: 1/2
vperm2i128 $32, %ymm5, %ymm8, %ymm4
vmovdqu 960(%r10), %ymm10
vperm2i128 $49, %ymm5, %ymm8, %ymm5
vmovdqu 992(%r10), %ymm12
vperm2i128 $32, %ymm7, %ymm9, %ymm6
vmovdqu 1024(%r10), %ymm11
vperm2i128 $49, %ymm7, %ymm9, %ymm7
vmovdqu 1056(%r10), %ymm13
vpsubw %ymm5, %ymm4, %ymm8
vpsubw %ymm7, %ymm6, %ymm9
vpaddw %ymm5, %ymm4, %ymm4
vpaddw %ymm7, %ymm6, %ymm6
vpmullw %ymm12, %ymm8, %ymm5
vpmullw %ymm13, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm11, %ymm9, %ymm9
vpmulhw %ymm14, %ymm5, %ymm5
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm5, %ymm8, %ymm5
vpsubw %ymm7, %ymm9, %ymm7
# 32: 1/2
vmovdqu 1088(%r10), %ymm10
vmovdqu 1120(%r10), %ymm12
vpaddw %ymm6, %ymm4, %ymm8
vpaddw %ymm7, %ymm5, %ymm9
vpsubw %ymm6, %ymm4, %ymm6
vpsubw %ymm7, %ymm5, %ymm7
vpmulhw %ymm15, %ymm8, %ymm4
vpmulhw %ymm15, %ymm9, %ymm5
vpsraw $10, %ymm4, %ymm4
vpsraw $10, %ymm5, %ymm5
vpmullw %ymm14, %ymm4, %ymm4
vpmullw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm6
vpsubw %ymm9, %ymm7, %ymm7
# 64: 1/2
vmovdqu 1152(%r10), %ymm10
vmovdqu 1184(%r10), %ymm12
vpsubw %ymm4, %ymm0, %ymm8
vpsubw %ymm5, %ymm1, %ymm9
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpmullw %ymm12, %ymm8, %ymm4
vpmullw %ymm12, %ymm9, %ymm5
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpsubw %ymm6, %ymm2, %ymm8
vpsubw %ymm7, %ymm3, %ymm9
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm8, %ymm6
vpmullw %ymm12, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm6, %ymm6
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm8, %ymm6
vpsubw %ymm7, %ymm9, %ymm7
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm1, 32(%rsi)
vmovdqu %ymm2, 64(%rsi)
vmovdqu %ymm3, 96(%rsi)
vmovdqu %ymm4, 128(%rsi)
vmovdqu %ymm5, 160(%rsi)
vmovdqu %ymm6, 192(%rsi)
vmovdqu %ymm7, 224(%rsi)
vmovdqu 256(%rsi), %ymm0
vmovdqu 288(%rsi), %ymm1
vmovdqu 320(%rsi), %ymm2
vmovdqu 352(%rsi), %ymm3
vmovdqu 384(%rsi), %ymm4
vmovdqu 416(%rsi), %ymm5
vmovdqu 448(%rsi), %ymm6
vmovdqu 480(%rsi), %ymm7
# 2: 2/2
vperm2i128 $32, %ymm1, %ymm0, %ymm8
vmovdqu 1216(%r10), %ymm10
vperm2i128 $49, %ymm1, %ymm0, %ymm9
vmovdqu 1248(%r10), %ymm12
vpsllq $32, %ymm9, %ymm0
vpsrlq $32, %ymm8, %ymm1
vpblendd $0xaa, %ymm0, %ymm8, %ymm0
vpblendd $0x55, %ymm1, %ymm9, %ymm1
vperm2i128 $32, %ymm3, %ymm2, %ymm8
vmovdqu 1280(%r10), %ymm11
vperm2i128 $49, %ymm3, %ymm2, %ymm9
vmovdqu 1312(%r10), %ymm13
vpsllq $32, %ymm9, %ymm2
vpsrlq $32, %ymm8, %ymm3
vpblendd $0xaa, %ymm2, %ymm8, %ymm2
vpblendd $0x55, %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 4: 2/2
vmovdqu 1344(%r10), %ymm10
vmovdqu 1376(%r10), %ymm12
vmovdqu 1408(%r10), %ymm11
vmovdqu 1440(%r10), %ymm13
vpunpckldq %ymm1, %ymm8, %ymm0
vpunpckhdq %ymm1, %ymm8, %ymm1
vpunpckldq %ymm3, %ymm9, %ymm2
vpunpckhdq %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmulhw %ymm15, %ymm8, %ymm0
vpmulhw %ymm15, %ymm9, %ymm2
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm2, %ymm2
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm8, %ymm8
vpsubw %ymm2, %ymm9, %ymm9
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 8: 2/2
vmovdqu 1472(%r10), %ymm10
vmovdqu 1504(%r10), %ymm12
vmovdqu 1536(%r10), %ymm11
vmovdqu 1568(%r10), %ymm13
vpunpcklqdq %ymm1, %ymm8, %ymm0
vpunpckhqdq %ymm1, %ymm8, %ymm1
vpunpcklqdq %ymm3, %ymm9, %ymm2
vpunpckhqdq %ymm3, %ymm9, %ymm3
vpaddw %ymm1, %ymm0, %ymm8
vpaddw %ymm3, %ymm2, %ymm9
vpsubw %ymm1, %ymm0, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpmullw %ymm12, %ymm1, %ymm0
vpmullw %ymm13, %ymm3, %ymm2
vpmulhw %ymm10, %ymm1, %ymm1
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm0, %ymm0
vpmulhw %ymm14, %ymm2, %ymm2
vpsubw %ymm0, %ymm1, %ymm1
vpsubw %ymm2, %ymm3, %ymm3
# 16: 2/2
vperm2i128 $32, %ymm1, %ymm8, %ymm0
vmovdqu 1600(%r10), %ymm10
vperm2i128 $49, %ymm1, %ymm8, %ymm1
vmovdqu 1632(%r10), %ymm12
vperm2i128 $32, %ymm3, %ymm9, %ymm2
vmovdqu 1664(%r10), %ymm11
vperm2i128 $49, %ymm3, %ymm9, %ymm3
vmovdqu 1696(%r10), %ymm13
vpsubw %ymm1, %ymm0, %ymm8
vpsubw %ymm3, %ymm2, %ymm9
vpaddw %ymm1, %ymm0, %ymm0
vpaddw %ymm3, %ymm2, %ymm2
vpmullw %ymm12, %ymm8, %ymm1
vpmullw %ymm13, %ymm9, %ymm3
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm11, %ymm9, %ymm9
vpmulhw %ymm14, %ymm1, %ymm1
vpmulhw %ymm14, %ymm3, %ymm3
vpsubw %ymm1, %ymm8, %ymm1
vpsubw %ymm3, %ymm9, %ymm3
# 32: 2/2
vmovdqu 1728(%r10), %ymm10
vmovdqu 1760(%r10), %ymm12
vpaddw %ymm2, %ymm0, %ymm8
vpaddw %ymm3, %ymm1, %ymm9
vpsubw %ymm2, %ymm0, %ymm2
vpsubw %ymm3, %ymm1, %ymm3
vpmulhw %ymm15, %ymm8, %ymm0
vpmulhw %ymm15, %ymm9, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm8, %ymm0
vpsubw %ymm1, %ymm9, %ymm1
vpmullw %ymm12, %ymm2, %ymm8
vpmullw %ymm12, %ymm3, %ymm9
vpmulhw %ymm10, %ymm2, %ymm2
vpmulhw %ymm10, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm9, %ymm3, %ymm3
# 2: 2/2
vperm2i128 $32, %ymm5, %ymm4, %ymm8
vmovdqu 1792(%r10), %ymm10
vperm2i128 $49, %ymm5, %ymm4, %ymm9
vmovdqu 1824(%r10), %ymm12
vpsllq $32, %ymm9, %ymm4
vpsrlq $32, %ymm8, %ymm5
vpblendd $0xaa, %ymm4, %ymm8, %ymm4
vpblendd $0x55, %ymm5, %ymm9, %ymm5
vperm2i128 $32, %ymm7, %ymm6, %ymm8
vmovdqu 1856(%r10), %ymm11
vperm2i128 $49, %ymm7, %ymm6, %ymm9
vmovdqu 1888(%r10), %ymm13
vpsllq $32, %ymm9, %ymm6
vpsrlq $32, %ymm8, %ymm7
vpblendd $0xaa, %ymm6, %ymm8, %ymm6
vpblendd $0x55, %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 4: 2/2
vmovdqu 1920(%r10), %ymm10
vmovdqu 1952(%r10), %ymm12
vmovdqu 1984(%r10), %ymm11
vmovdqu 2016(%r10), %ymm13
vpunpckldq %ymm5, %ymm8, %ymm4
vpunpckhdq %ymm5, %ymm8, %ymm5
vpunpckldq %ymm7, %ymm9, %ymm6
vpunpckhdq %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmulhw %ymm15, %ymm8, %ymm4
vpmulhw %ymm15, %ymm9, %ymm6
vpsraw $10, %ymm4, %ymm4
vpsraw $10, %ymm6, %ymm6
vpmullw %ymm14, %ymm4, %ymm4
vpmullw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm8, %ymm8
vpsubw %ymm6, %ymm9, %ymm9
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 8: 2/2
vmovdqu 2048(%r10), %ymm10
vmovdqu 2080(%r10), %ymm12
vmovdqu 2112(%r10), %ymm11
vmovdqu 2144(%r10), %ymm13
vpunpcklqdq %ymm5, %ymm8, %ymm4
vpunpckhqdq %ymm5, %ymm8, %ymm5
vpunpcklqdq %ymm7, %ymm9, %ymm6
vpunpckhqdq %ymm7, %ymm9, %ymm7
vpaddw %ymm5, %ymm4, %ymm8
vpaddw %ymm7, %ymm6, %ymm9
vpsubw %ymm5, %ymm4, %ymm5
vpsubw %ymm7, %ymm6, %ymm7
vpmullw %ymm12, %ymm5, %ymm4
vpmullw %ymm13, %ymm7, %ymm6
vpmulhw %ymm10, %ymm5, %ymm5
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm6, %ymm6
vpsubw %ymm4, %ymm5, %ymm5
vpsubw %ymm6, %ymm7, %ymm7
# 16: 2/2
vperm2i128 $32, %ymm5, %ymm8, %ymm4
vmovdqu 2176(%r10), %ymm10
vperm2i128 $49, %ymm5, %ymm8, %ymm5
vmovdqu 2208(%r10), %ymm12
vperm2i128 $32, %ymm7, %ymm9, %ymm6
vmovdqu 2240(%r10), %ymm11
vperm2i128 $49, %ymm7, %ymm9, %ymm7
vmovdqu 2272(%r10), %ymm13
vpsubw %ymm5, %ymm4, %ymm8
vpsubw %ymm7, %ymm6, %ymm9
vpaddw %ymm5, %ymm4, %ymm4
vpaddw %ymm7, %ymm6, %ymm6
vpmullw %ymm12, %ymm8, %ymm5
vpmullw %ymm13, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm11, %ymm9, %ymm9
vpmulhw %ymm14, %ymm5, %ymm5
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm5, %ymm8, %ymm5
vpsubw %ymm7, %ymm9, %ymm7
# 32: 2/2
vmovdqu 2304(%r10), %ymm10
vmovdqu 2336(%r10), %ymm12
vpaddw %ymm6, %ymm4, %ymm8
vpaddw %ymm7, %ymm5, %ymm9
vpsubw %ymm6, %ymm4, %ymm6
vpsubw %ymm7, %ymm5, %ymm7
vpmulhw %ymm15, %ymm8, %ymm4
vpmulhw %ymm15, %ymm9, %ymm5
vpsraw $10, %ymm4, %ymm4
vpsraw $10, %ymm5, %ymm5
vpmullw %ymm14, %ymm4, %ymm4
vpmullw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpmullw %ymm12, %ymm6, %ymm8
vpmullw %ymm12, %ymm7, %ymm9
vpmulhw %ymm10, %ymm6, %ymm6
vpmulhw %ymm10, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm6
vpsubw %ymm9, %ymm7, %ymm7
# 64: 2/2
vmovdqu 2368(%r10), %ymm10
vmovdqu 2400(%r10), %ymm12
vpsubw %ymm4, %ymm0, %ymm8
vpsubw %ymm5, %ymm1, %ymm9
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpmullw %ymm12, %ymm8, %ymm4
vpmullw %ymm12, %ymm9, %ymm5
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpsubw %ymm6, %ymm2, %ymm8
vpsubw %ymm7, %ymm3, %ymm9
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm8, %ymm6
vpmullw %ymm12, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm6, %ymm6
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm8, %ymm6
vpsubw %ymm7, %ymm9, %ymm7
vmovdqu %ymm0, 256(%rsi)
vmovdqu %ymm1, 288(%rsi)
vmovdqu %ymm2, 320(%rsi)
vmovdqu %ymm3, 352(%rsi)
# 128
vmovdqu 2432(%r10), %ymm10
vmovdqu 2464(%r10), %ymm12
vmovdqu 2496(%r10), %ymm11
vmovdqu 2528(%r10), %ymm13
vmovdqu 128(%rsi), %ymm0
vmovdqu 160(%rsi), %ymm1
vmovdqu 192(%rsi), %ymm2
vmovdqu 224(%rsi), %ymm3
vpsubw %ymm4, %ymm0, %ymm8
vpsubw %ymm5, %ymm1, %ymm9
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpmullw %ymm12, %ymm8, %ymm4
vpmullw %ymm12, %ymm9, %ymm5
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpsubw %ymm6, %ymm2, %ymm8
vpsubw %ymm7, %ymm3, %ymm9
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm8, %ymm6
vpmullw %ymm12, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm6, %ymm6
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm8, %ymm6
vpsubw %ymm7, %ymm9, %ymm7
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm11, %ymm0, %ymm0
vpmulhw %ymm11, %ymm1, %ymm1
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm0, %ymm0
vpsubw %ymm9, %ymm1, %ymm1
vpmullw %ymm13, %ymm2, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm11, %ymm2, %ymm2
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm9, %ymm3, %ymm3
vpmullw %ymm13, %ymm4, %ymm8
vpmullw %ymm13, %ymm5, %ymm9
vpmulhw %ymm11, %ymm4, %ymm4
vpmulhw %ymm11, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm4
vpsubw %ymm9, %ymm5, %ymm5
vpmullw %ymm13, %ymm6, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm11, %ymm6, %ymm6
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm6
vpsubw %ymm9, %ymm7, %ymm7
vmovdqu %ymm0, 128(%rsi)
vmovdqu %ymm1, 160(%rsi)
vmovdqu %ymm2, 192(%rsi)
vmovdqu %ymm3, 224(%rsi)
vmovdqu %ymm4, 384(%rsi)
vmovdqu %ymm5, 416(%rsi)
vmovdqu %ymm6, 448(%rsi)
vmovdqu %ymm7, 480(%rsi)
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovdqu 64(%rsi), %ymm2
vmovdqu 96(%rsi), %ymm3
vmovdqu 256(%rsi), %ymm4
vmovdqu 288(%rsi), %ymm5
vmovdqu 320(%rsi), %ymm6
vmovdqu 352(%rsi), %ymm7
vpsubw %ymm4, %ymm0, %ymm8
vpsubw %ymm5, %ymm1, %ymm9
vpaddw %ymm4, %ymm0, %ymm0
vpaddw %ymm5, %ymm1, %ymm1
vpmullw %ymm12, %ymm8, %ymm4
vpmullw %ymm12, %ymm9, %ymm5
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm4, %ymm4
vpmulhw %ymm14, %ymm5, %ymm5
vpsubw %ymm4, %ymm8, %ymm4
vpsubw %ymm5, %ymm9, %ymm5
vpsubw %ymm6, %ymm2, %ymm8
vpsubw %ymm7, %ymm3, %ymm9
vpaddw %ymm6, %ymm2, %ymm2
vpaddw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm8, %ymm6
vpmullw %ymm12, %ymm9, %ymm7
vpmulhw %ymm10, %ymm8, %ymm8
vpmulhw %ymm10, %ymm9, %ymm9
vpmulhw %ymm14, %ymm6, %ymm6
vpmulhw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm8, %ymm6
vpsubw %ymm7, %ymm9, %ymm7
vpmullw %ymm13, %ymm0, %ymm8
vpmullw %ymm13, %ymm1, %ymm9
vpmulhw %ymm11, %ymm0, %ymm0
vpmulhw %ymm11, %ymm1, %ymm1
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm0, %ymm0
vpsubw %ymm9, %ymm1, %ymm1
vpmullw %ymm13, %ymm2, %ymm8
vpmullw %ymm13, %ymm3, %ymm9
vpmulhw %ymm11, %ymm2, %ymm2
vpmulhw %ymm11, %ymm3, %ymm3
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm9, %ymm3, %ymm3
vpmullw %ymm13, %ymm4, %ymm8
vpmullw %ymm13, %ymm5, %ymm9
vpmulhw %ymm11, %ymm4, %ymm4
vpmulhw %ymm11, %ymm5, %ymm5
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm4, %ymm4
vpsubw %ymm9, %ymm5, %ymm5
vpmullw %ymm13, %ymm6, %ymm8
vpmullw %ymm13, %ymm7, %ymm9
vpmulhw %ymm11, %ymm6, %ymm6
vpmulhw %ymm11, %ymm7, %ymm7
vpmulhw %ymm14, %ymm8, %ymm8
vpmulhw %ymm14, %ymm9, %ymm9
vpsubw %ymm8, %ymm6, %ymm6
vpsubw %ymm9, %ymm7, %ymm7
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm1, 32(%rsi)
vmovdqu %ymm2, 64(%rsi)
vmovdqu %ymm3, 96(%rsi)
vmovdqu %ymm4, 256(%rsi)
vmovdqu %ymm5, 288(%rsi)
vmovdqu %ymm6, 320(%rsi)
vmovdqu %ymm7, 352(%rsi)
# Sub Errors
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovdqu 64(%rsi), %ymm2
vmovdqu 96(%rsi), %ymm3
vmovdqu (%rcx), %ymm4
vmovdqu 32(%rcx), %ymm5
vmovdqu 64(%rcx), %ymm6
vmovdqu 96(%rcx), %ymm7
vpsubw %ymm0, %ymm4, %ymm4
vpsubw %ymm1, %ymm5, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpsubw %ymm2, %ymm6, %ymm6
vpsubw %ymm3, %ymm7, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm1, 32(%rsi)
vmovdqu %ymm2, 64(%rsi)
vmovdqu %ymm3, 96(%rsi)
vmovdqu 128(%rsi), %ymm0
vmovdqu 160(%rsi), %ymm1
vmovdqu 192(%rsi), %ymm2
vmovdqu 224(%rsi), %ymm3
vmovdqu 128(%rcx), %ymm4
vmovdqu 160(%rcx), %ymm5
vmovdqu 192(%rcx), %ymm6
vmovdqu 224(%rcx), %ymm7
vpsubw %ymm0, %ymm4, %ymm4
vpsubw %ymm1, %ymm5, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpsubw %ymm2, %ymm6, %ymm6
vpsubw %ymm3, %ymm7, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, 128(%rsi)
vmovdqu %ymm1, 160(%rsi)
vmovdqu %ymm2, 192(%rsi)
vmovdqu %ymm3, 224(%rsi)
vmovdqu 256(%rsi), %ymm0
vmovdqu 288(%rsi), %ymm1
vmovdqu 320(%rsi), %ymm2
vmovdqu 352(%rsi), %ymm3
vmovdqu 256(%rcx), %ymm4
vmovdqu 288(%rcx), %ymm5
vmovdqu 320(%rcx), %ymm6
vmovdqu 352(%rcx), %ymm7
vpsubw %ymm0, %ymm4, %ymm4
vpsubw %ymm1, %ymm5, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpsubw %ymm2, %ymm6, %ymm6
vpsubw %ymm3, %ymm7, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, 256(%rsi)
vmovdqu %ymm1, 288(%rsi)
vmovdqu %ymm2, 320(%rsi)
vmovdqu %ymm3, 352(%rsi)
vmovdqu 384(%rsi), %ymm0
vmovdqu 416(%rsi), %ymm1
vmovdqu 448(%rsi), %ymm2
vmovdqu 480(%rsi), %ymm3
vmovdqu 384(%rcx), %ymm4
vmovdqu 416(%rcx), %ymm5
vmovdqu 448(%rcx), %ymm6
vmovdqu 480(%rcx), %ymm7
vpsubw %ymm0, %ymm4, %ymm4
vpsubw %ymm1, %ymm5, %ymm5
vpmulhw %ymm15, %ymm4, %ymm0
vpmulhw %ymm15, %ymm5, %ymm1
vpsraw $10, %ymm0, %ymm0
vpsraw $10, %ymm1, %ymm1
vpmullw %ymm14, %ymm0, %ymm0
vpmullw %ymm14, %ymm1, %ymm1
vpsubw %ymm0, %ymm4, %ymm0
vpsubw %ymm1, %ymm5, %ymm1
vpsubw %ymm2, %ymm6, %ymm6
vpsubw %ymm3, %ymm7, %ymm7
vpmulhw %ymm15, %ymm6, %ymm2
vpmulhw %ymm15, %ymm7, %ymm3
vpsraw $10, %ymm2, %ymm2
vpsraw $10, %ymm3, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm2, %ymm6, %ymm2
vpsubw %ymm3, %ymm7, %ymm3
vmovdqu %ymm0, 384(%rsi)
vmovdqu %ymm1, 416(%rsi)
vmovdqu %ymm2, 448(%rsi)
vmovdqu %ymm3, 480(%rsi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_decapsulate_avx2,.-kyber_decapsulate_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl kyber_csubq_avx2
.type kyber_csubq_avx2,@function
.align 16
kyber_csubq_avx2:
#else
.section __TEXT,__text
.globl _kyber_csubq_avx2
.p2align 4
_kyber_csubq_avx2:
#endif /* __APPLE__ */
vmovdqu kyber_q(%rip), %ymm12
vmovdqu (%rdi), %ymm0
vmovdqu 32(%rdi), %ymm1
vmovdqu 64(%rdi), %ymm2
vmovdqu 96(%rdi), %ymm3
vmovdqu 128(%rdi), %ymm4
vmovdqu 160(%rdi), %ymm5
vmovdqu 192(%rdi), %ymm6
vmovdqu 224(%rdi), %ymm7
vpsubw %ymm12, %ymm0, %ymm8
vpsubw %ymm12, %ymm1, %ymm9
vpsubw %ymm12, %ymm2, %ymm10
vpsubw %ymm12, %ymm3, %ymm11
vpsraw $15, %ymm8, %ymm0
vpsraw $15, %ymm9, %ymm1
vpsraw $15, %ymm10, %ymm2
vpsraw $15, %ymm11, %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpaddw %ymm10, %ymm2, %ymm2
vpaddw %ymm11, %ymm3, %ymm3
vpsubw %ymm12, %ymm4, %ymm8
vpsubw %ymm12, %ymm5, %ymm9
vpsubw %ymm12, %ymm6, %ymm10
vpsubw %ymm12, %ymm7, %ymm11
vpsraw $15, %ymm8, %ymm4
vpsraw $15, %ymm9, %ymm5
vpsraw $15, %ymm10, %ymm6
vpsraw $15, %ymm11, %ymm7
vpand %ymm12, %ymm4, %ymm4
vpand %ymm12, %ymm5, %ymm5
vpand %ymm12, %ymm6, %ymm6
vpand %ymm12, %ymm7, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm5, %ymm5
vpaddw %ymm10, %ymm6, %ymm6
vpaddw %ymm11, %ymm7, %ymm7
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, 128(%rdi)
vmovdqu %ymm5, 160(%rdi)
vmovdqu %ymm6, 192(%rdi)
vmovdqu %ymm7, 224(%rdi)
vmovdqu 256(%rdi), %ymm0
vmovdqu 288(%rdi), %ymm1
vmovdqu 320(%rdi), %ymm2
vmovdqu 352(%rdi), %ymm3
vmovdqu 384(%rdi), %ymm4
vmovdqu 416(%rdi), %ymm5
vmovdqu 448(%rdi), %ymm6
vmovdqu 480(%rdi), %ymm7
vpsubw %ymm12, %ymm0, %ymm8
vpsubw %ymm12, %ymm1, %ymm9
vpsubw %ymm12, %ymm2, %ymm10
vpsubw %ymm12, %ymm3, %ymm11
vpsraw $15, %ymm8, %ymm0
vpsraw $15, %ymm9, %ymm1
vpsraw $15, %ymm10, %ymm2
vpsraw $15, %ymm11, %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpaddw %ymm10, %ymm2, %ymm2
vpaddw %ymm11, %ymm3, %ymm3
vpsubw %ymm12, %ymm4, %ymm8
vpsubw %ymm12, %ymm5, %ymm9
vpsubw %ymm12, %ymm6, %ymm10
vpsubw %ymm12, %ymm7, %ymm11
vpsraw $15, %ymm8, %ymm4
vpsraw $15, %ymm9, %ymm5
vpsraw $15, %ymm10, %ymm6
vpsraw $15, %ymm11, %ymm7
vpand %ymm12, %ymm4, %ymm4
vpand %ymm12, %ymm5, %ymm5
vpand %ymm12, %ymm6, %ymm6
vpand %ymm12, %ymm7, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm5, %ymm5
vpaddw %ymm10, %ymm6, %ymm6
vpaddw %ymm11, %ymm7, %ymm7
vmovdqu %ymm0, 256(%rdi)
vmovdqu %ymm1, 288(%rdi)
vmovdqu %ymm2, 320(%rdi)
vmovdqu %ymm3, 352(%rdi)
vmovdqu %ymm4, 384(%rdi)
vmovdqu %ymm5, 416(%rdi)
vmovdqu %ymm6, 448(%rdi)
vmovdqu %ymm7, 480(%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_csubq_avx2,.-kyber_csubq_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_rej_idx:
.quad 0xffffffffffffffff,0xffffffffffffff00
.quad 0xffffffffffffff02,0xffffffffffff0200
.quad 0xffffffffffffff04,0xffffffffffff0400
.quad 0xffffffffffff0402,0xffffffffff040200
.quad 0xffffffffffffff06,0xffffffffffff0600
.quad 0xffffffffffff0602,0xffffffffff060200
.quad 0xffffffffffff0604,0xffffffffff060400
.quad 0xffffffffff060402,0xffffffff06040200
.quad 0xffffffffffffff08,0xffffffffffff0800
.quad 0xffffffffffff0802,0xffffffffff080200
.quad 0xffffffffffff0804,0xffffffffff080400
.quad 0xffffffffff080402,0xffffffff08040200
.quad 0xffffffffffff0806,0xffffffffff080600
.quad 0xffffffffff080602,0xffffffff08060200
.quad 0xffffffffff080604,0xffffffff08060400
.quad 0xffffffff08060402,0xffffff0806040200
.quad 0xffffffffffffff0a,0xffffffffffff0a00
.quad 0xffffffffffff0a02,0xffffffffff0a0200
.quad 0xffffffffffff0a04,0xffffffffff0a0400
.quad 0xffffffffff0a0402,0xffffffff0a040200
.quad 0xffffffffffff0a06,0xffffffffff0a0600
.quad 0xffffffffff0a0602,0xffffffff0a060200
.quad 0xffffffffff0a0604,0xffffffff0a060400
.quad 0xffffffff0a060402,0xffffff0a06040200
.quad 0xffffffffffff0a08,0xffffffffff0a0800
.quad 0xffffffffff0a0802,0xffffffff0a080200
.quad 0xffffffffff0a0804,0xffffffff0a080400
.quad 0xffffffff0a080402,0xffffff0a08040200
.quad 0xffffffffff0a0806,0xffffffff0a080600
.quad 0xffffffff0a080602,0xffffff0a08060200
.quad 0xffffffff0a080604,0xffffff0a08060400
.quad 0xffffff0a08060402,0xffff0a0806040200
.quad 0xffffffffffffff0c,0xffffffffffff0c00
.quad 0xffffffffffff0c02,0xffffffffff0c0200
.quad 0xffffffffffff0c04,0xffffffffff0c0400
.quad 0xffffffffff0c0402,0xffffffff0c040200
.quad 0xffffffffffff0c06,0xffffffffff0c0600
.quad 0xffffffffff0c0602,0xffffffff0c060200
.quad 0xffffffffff0c0604,0xffffffff0c060400
.quad 0xffffffff0c060402,0xffffff0c06040200
.quad 0xffffffffffff0c08,0xffffffffff0c0800
.quad 0xffffffffff0c0802,0xffffffff0c080200
.quad 0xffffffffff0c0804,0xffffffff0c080400
.quad 0xffffffff0c080402,0xffffff0c08040200
.quad 0xffffffffff0c0806,0xffffffff0c080600
.quad 0xffffffff0c080602,0xffffff0c08060200
.quad 0xffffffff0c080604,0xffffff0c08060400
.quad 0xffffff0c08060402,0xffff0c0806040200
.quad 0xffffffffffff0c0a,0xffffffffff0c0a00
.quad 0xffffffffff0c0a02,0xffffffff0c0a0200
.quad 0xffffffffff0c0a04,0xffffffff0c0a0400
.quad 0xffffffff0c0a0402,0xffffff0c0a040200
.quad 0xffffffffff0c0a06,0xffffffff0c0a0600
.quad 0xffffffff0c0a0602,0xffffff0c0a060200
.quad 0xffffffff0c0a0604,0xffffff0c0a060400
.quad 0xffffff0c0a060402,0xffff0c0a06040200
.quad 0xffffffffff0c0a08,0xffffffff0c0a0800
.quad 0xffffffff0c0a0802,0xffffff0c0a080200
.quad 0xffffffff0c0a0804,0xffffff0c0a080400
.quad 0xffffff0c0a080402,0xffff0c0a08040200
.quad 0xffffffff0c0a0806,0xffffff0c0a080600
.quad 0xffffff0c0a080602,0xffff0c0a08060200
.quad 0xffffff0c0a080604,0xffff0c0a08060400
.quad 0xffff0c0a08060402,0xff0c0a0806040200
.quad 0xffffffffffffff0e,0xffffffffffff0e00
.quad 0xffffffffffff0e02,0xffffffffff0e0200
.quad 0xffffffffffff0e04,0xffffffffff0e0400
.quad 0xffffffffff0e0402,0xffffffff0e040200
.quad 0xffffffffffff0e06,0xffffffffff0e0600
.quad 0xffffffffff0e0602,0xffffffff0e060200
.quad 0xffffffffff0e0604,0xffffffff0e060400
.quad 0xffffffff0e060402,0xffffff0e06040200
.quad 0xffffffffffff0e08,0xffffffffff0e0800
.quad 0xffffffffff0e0802,0xffffffff0e080200
.quad 0xffffffffff0e0804,0xffffffff0e080400
.quad 0xffffffff0e080402,0xffffff0e08040200
.quad 0xffffffffff0e0806,0xffffffff0e080600
.quad 0xffffffff0e080602,0xffffff0e08060200
.quad 0xffffffff0e080604,0xffffff0e08060400
.quad 0xffffff0e08060402,0xffff0e0806040200
.quad 0xffffffffffff0e0a,0xffffffffff0e0a00
.quad 0xffffffffff0e0a02,0xffffffff0e0a0200
.quad 0xffffffffff0e0a04,0xffffffff0e0a0400
.quad 0xffffffff0e0a0402,0xffffff0e0a040200
.quad 0xffffffffff0e0a06,0xffffffff0e0a0600
.quad 0xffffffff0e0a0602,0xffffff0e0a060200
.quad 0xffffffff0e0a0604,0xffffff0e0a060400
.quad 0xffffff0e0a060402,0xffff0e0a06040200
.quad 0xffffffffff0e0a08,0xffffffff0e0a0800
.quad 0xffffffff0e0a0802,0xffffff0e0a080200
.quad 0xffffffff0e0a0804,0xffffff0e0a080400
.quad 0xffffff0e0a080402,0xffff0e0a08040200
.quad 0xffffffff0e0a0806,0xffffff0e0a080600
.quad 0xffffff0e0a080602,0xffff0e0a08060200
.quad 0xffffff0e0a080604,0xffff0e0a08060400
.quad 0xffff0e0a08060402,0xff0e0a0806040200
.quad 0xffffffffffff0e0c,0xffffffffff0e0c00
.quad 0xffffffffff0e0c02,0xffffffff0e0c0200
.quad 0xffffffffff0e0c04,0xffffffff0e0c0400
.quad 0xffffffff0e0c0402,0xffffff0e0c040200
.quad 0xffffffffff0e0c06,0xffffffff0e0c0600
.quad 0xffffffff0e0c0602,0xffffff0e0c060200
.quad 0xffffffff0e0c0604,0xffffff0e0c060400
.quad 0xffffff0e0c060402,0xffff0e0c06040200
.quad 0xffffffffff0e0c08,0xffffffff0e0c0800
.quad 0xffffffff0e0c0802,0xffffff0e0c080200
.quad 0xffffffff0e0c0804,0xffffff0e0c080400
.quad 0xffffff0e0c080402,0xffff0e0c08040200
.quad 0xffffffff0e0c0806,0xffffff0e0c080600
.quad 0xffffff0e0c080602,0xffff0e0c08060200
.quad 0xffffff0e0c080604,0xffff0e0c08060400
.quad 0xffff0e0c08060402,0xff0e0c0806040200
.quad 0xffffffffff0e0c0a,0xffffffff0e0c0a00
.quad 0xffffffff0e0c0a02,0xffffff0e0c0a0200
.quad 0xffffffff0e0c0a04,0xffffff0e0c0a0400
.quad 0xffffff0e0c0a0402,0xffff0e0c0a040200
.quad 0xffffffff0e0c0a06,0xffffff0e0c0a0600
.quad 0xffffff0e0c0a0602,0xffff0e0c0a060200
.quad 0xffffff0e0c0a0604,0xffff0e0c0a060400
.quad 0xffff0e0c0a060402,0xff0e0c0a06040200
.quad 0xffffffff0e0c0a08,0xffffff0e0c0a0800
.quad 0xffffff0e0c0a0802,0xffff0e0c0a080200
.quad 0xffffff0e0c0a0804,0xffff0e0c0a080400
.quad 0xffff0e0c0a080402,0xff0e0c0a08040200
.quad 0xffffff0e0c0a0806,0xffff0e0c0a080600
.quad 0xffff0e0c0a080602,0xff0e0c0a08060200
.quad 0xffff0e0c0a080604,0xff0e0c0a08060400
.quad 0xff0e0c0a08060402,0xe0c0a0806040200
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_rej_q:
.quad 0xd010d010d010d01, 0xd010d010d010d01
.quad 0xd010d010d010d01, 0xd010d010d010d01
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_rej_ones:
.quad 0x101010101010101, 0x101010101010101
.quad 0x101010101010101, 0x101010101010101
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_rej_mask:
.quad 0xfff0fff0fff0fff, 0xfff0fff0fff0fff
.quad 0xfff0fff0fff0fff, 0xfff0fff0fff0fff
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_rej_shuffle:
.quad 0x504040302010100, 0xb0a0a0908070706
.quad 0x908080706050504, 0xf0e0e0d0c0b0b0a
#ifndef __APPLE__
.text
.globl kyber_rej_uniform_n_avx2
.type kyber_rej_uniform_n_avx2,@function
.align 16
kyber_rej_uniform_n_avx2:
#else
.section __TEXT,__text
.globl _kyber_rej_uniform_n_avx2
.p2align 4
_kyber_rej_uniform_n_avx2:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
movq %rcx, %r8
movl %esi, %eax
vmovdqu L_kyber_rej_q(%rip), %ymm6
vmovdqu L_kyber_rej_ones(%rip), %ymm7
vmovdqu L_kyber_rej_mask(%rip), %ymm8
vmovdqu L_kyber_rej_shuffle(%rip), %ymm9
leaq L_kyber_rej_idx(%rip), %r9
movq $0x1111111111111111, %r14
movq $0xe0c0a0806040200, %rbp
movq $0x101010101010101, %r13
vpermq $0x94, (%rdx), %ymm0
vpermq $0x94, 24(%rdx), %ymm1
vpshufb %ymm9, %ymm0, %ymm0
vpshufb %ymm9, %ymm1, %ymm1
vpsrlw $4, %ymm0, %ymm2
vpsrlw $4, %ymm1, %ymm3
vpblendw $0xaa, %ymm2, %ymm0, %ymm0
vpblendw $0xaa, %ymm3, %ymm1, %ymm1
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpcmpgtw %ymm0, %ymm6, %ymm2
vpcmpgtw %ymm1, %ymm6, %ymm3
vpacksswb %ymm3, %ymm2, %ymm2
vpmovmskb %ymm2, %rbx
movzbl %bl, %r10d
movzbl %bh, %ecx
movq %rbx, %r11
movq %rbx, %r12
shrq $16, %r11
shrq $24, %r12
andq $0xff, %r11
andq $0xff, %r12
movq (%r9,%r10,8), %xmm2
movq (%r9,%rcx,8), %xmm3
movq (%r9,%r11,8), %xmm4
movq (%r9,%r12,8), %xmm5
vinserti128 $0x01, %xmm4, %ymm2, %ymm2
vinserti128 $0x01, %xmm5, %ymm3, %ymm3
vpaddb %ymm7, %ymm2, %ymm4
vpaddb %ymm7, %ymm3, %ymm5
vpunpcklbw %ymm4, %ymm2, %ymm2
vpunpcklbw %ymm5, %ymm3, %ymm3
vpshufb %ymm2, %ymm0, %ymm0
vpshufb %ymm3, %ymm1, %ymm1
movq %rbx, %r10
movq %rbx, %r11
movq %rbx, %r12
andq $0xff, %rbx
shrq $16, %r10
shrq $8, %r11
shrq $24, %r12
andq $0xff, %r10
andq $0xff, %r11
popcntl %ebx, %ebx
popcntl %r10d, %r10d
popcntl %r11d, %r11d
popcntl %r12d, %r12d
vmovdqu %xmm0, (%rdi)
vextracti128 $0x01, %ymm0, %xmm0
leaq (%rdi,%rbx,2), %rdi
subl %ebx, %esi
vmovdqu %xmm0, (%rdi)
leaq (%rdi,%r10,2), %rdi
subl %r10d, %esi
vmovdqu %xmm1, (%rdi)
vextracti128 $0x01, %ymm1, %xmm1
leaq (%rdi,%r11,2), %rdi
subl %r11d, %esi
vmovdqu %xmm1, (%rdi)
leaq (%rdi,%r12,2), %rdi
subl %r12d, %esi
vpermq $0x94, 48(%rdx), %ymm0
vpermq $0x94, 72(%rdx), %ymm1
vpshufb %ymm9, %ymm0, %ymm0
vpshufb %ymm9, %ymm1, %ymm1
vpsrlw $4, %ymm0, %ymm2
vpsrlw $4, %ymm1, %ymm3
vpblendw $0xaa, %ymm2, %ymm0, %ymm0
vpblendw $0xaa, %ymm3, %ymm1, %ymm1
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpcmpgtw %ymm0, %ymm6, %ymm2
vpcmpgtw %ymm1, %ymm6, %ymm3
vpacksswb %ymm3, %ymm2, %ymm2
vpmovmskb %ymm2, %rbx
movzbl %bl, %r10d
movzbl %bh, %ecx
movq %rbx, %r11
movq %rbx, %r12
shrq $16, %r11
shrq $24, %r12
andq $0xff, %r11
andq $0xff, %r12
movq (%r9,%r10,8), %xmm2
movq (%r9,%rcx,8), %xmm3
movq (%r9,%r11,8), %xmm4
movq (%r9,%r12,8), %xmm5
vinserti128 $0x01, %xmm4, %ymm2, %ymm2
vinserti128 $0x01, %xmm5, %ymm3, %ymm3
vpaddb %ymm7, %ymm2, %ymm4
vpaddb %ymm7, %ymm3, %ymm5
vpunpcklbw %ymm4, %ymm2, %ymm2
vpunpcklbw %ymm5, %ymm3, %ymm3
vpshufb %ymm2, %ymm0, %ymm0
vpshufb %ymm3, %ymm1, %ymm1
movq %rbx, %r10
movq %rbx, %r11
movq %rbx, %r12
andq $0xff, %rbx
shrq $16, %r10
shrq $8, %r11
shrq $24, %r12
andq $0xff, %r10
andq $0xff, %r11
popcntl %ebx, %ebx
popcntl %r10d, %r10d
popcntl %r11d, %r11d
popcntl %r12d, %r12d
vmovdqu %xmm0, (%rdi)
vextracti128 $0x01, %ymm0, %xmm0
leaq (%rdi,%rbx,2), %rdi
subl %ebx, %esi
vmovdqu %xmm0, (%rdi)
leaq (%rdi,%r10,2), %rdi
subl %r10d, %esi
vmovdqu %xmm1, (%rdi)
vextracti128 $0x01, %ymm1, %xmm1
leaq (%rdi,%r11,2), %rdi
subl %r11d, %esi
vmovdqu %xmm1, (%rdi)
leaq (%rdi,%r12,2), %rdi
subl %r12d, %esi
vpermq $0x94, 96(%rdx), %ymm0
vpermq $0x94, 120(%rdx), %ymm1
vpshufb %ymm9, %ymm0, %ymm0
vpshufb %ymm9, %ymm1, %ymm1
vpsrlw $4, %ymm0, %ymm2
vpsrlw $4, %ymm1, %ymm3
vpblendw $0xaa, %ymm2, %ymm0, %ymm0
vpblendw $0xaa, %ymm3, %ymm1, %ymm1
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpcmpgtw %ymm0, %ymm6, %ymm2
vpcmpgtw %ymm1, %ymm6, %ymm3
vpacksswb %ymm3, %ymm2, %ymm2
vpmovmskb %ymm2, %rbx
movzbl %bl, %r10d
movzbl %bh, %ecx
movq %rbx, %r11
movq %rbx, %r12
shrq $16, %r11
shrq $24, %r12
andq $0xff, %r11
andq $0xff, %r12
movq (%r9,%r10,8), %xmm2
movq (%r9,%rcx,8), %xmm3
movq (%r9,%r11,8), %xmm4
movq (%r9,%r12,8), %xmm5
vinserti128 $0x01, %xmm4, %ymm2, %ymm2
vinserti128 $0x01, %xmm5, %ymm3, %ymm3
vpaddb %ymm7, %ymm2, %ymm4
vpaddb %ymm7, %ymm3, %ymm5
vpunpcklbw %ymm4, %ymm2, %ymm2
vpunpcklbw %ymm5, %ymm3, %ymm3
vpshufb %ymm2, %ymm0, %ymm0
vpshufb %ymm3, %ymm1, %ymm1
movq %rbx, %r10
movq %rbx, %r11
movq %rbx, %r12
andq $0xff, %rbx
shrq $16, %r10
shrq $8, %r11
shrq $24, %r12
andq $0xff, %r10
andq $0xff, %r11
popcntl %ebx, %ebx
popcntl %r10d, %r10d
popcntl %r11d, %r11d
popcntl %r12d, %r12d
vmovdqu %xmm0, (%rdi)
vextracti128 $0x01, %ymm0, %xmm0
leaq (%rdi,%rbx,2), %rdi
subl %ebx, %esi
vmovdqu %xmm0, (%rdi)
leaq (%rdi,%r10,2), %rdi
subl %r10d, %esi
vmovdqu %xmm1, (%rdi)
vextracti128 $0x01, %ymm1, %xmm1
leaq (%rdi,%r11,2), %rdi
subl %r11d, %esi
vmovdqu %xmm1, (%rdi)
leaq (%rdi,%r12,2), %rdi
subl %r12d, %esi
vpermq $0x94, 144(%rdx), %ymm0
vpermq $0x94, 168(%rdx), %ymm1
vpshufb %ymm9, %ymm0, %ymm0
vpshufb %ymm9, %ymm1, %ymm1
vpsrlw $4, %ymm0, %ymm2
vpsrlw $4, %ymm1, %ymm3
vpblendw $0xaa, %ymm2, %ymm0, %ymm0
vpblendw $0xaa, %ymm3, %ymm1, %ymm1
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpcmpgtw %ymm0, %ymm6, %ymm2
vpcmpgtw %ymm1, %ymm6, %ymm3
vpacksswb %ymm3, %ymm2, %ymm2
vpmovmskb %ymm2, %rbx
movzbl %bl, %r10d
movzbl %bh, %ecx
movq %rbx, %r11
movq %rbx, %r12
shrq $16, %r11
shrq $24, %r12
andq $0xff, %r11
andq $0xff, %r12
movq (%r9,%r10,8), %xmm2
movq (%r9,%rcx,8), %xmm3
movq (%r9,%r11,8), %xmm4
movq (%r9,%r12,8), %xmm5
vinserti128 $0x01, %xmm4, %ymm2, %ymm2
vinserti128 $0x01, %xmm5, %ymm3, %ymm3
vpaddb %ymm7, %ymm2, %ymm4
vpaddb %ymm7, %ymm3, %ymm5
vpunpcklbw %ymm4, %ymm2, %ymm2
vpunpcklbw %ymm5, %ymm3, %ymm3
vpshufb %ymm2, %ymm0, %ymm0
vpshufb %ymm3, %ymm1, %ymm1
movq %rbx, %r10
movq %rbx, %r11
movq %rbx, %r12
andq $0xff, %rbx
shrq $16, %r10
shrq $8, %r11
shrq $24, %r12
andq $0xff, %r10
andq $0xff, %r11
popcntl %ebx, %ebx
popcntl %r10d, %r10d
popcntl %r11d, %r11d
popcntl %r12d, %r12d
vmovdqu %xmm0, (%rdi)
vextracti128 $0x01, %ymm0, %xmm0
leaq (%rdi,%rbx,2), %rdi
subl %ebx, %esi
vmovdqu %xmm0, (%rdi)
leaq (%rdi,%r10,2), %rdi
subl %r10d, %esi
vmovdqu %xmm1, (%rdi)
vextracti128 $0x01, %ymm1, %xmm1
leaq (%rdi,%r11,2), %rdi
subl %r11d, %esi
vmovdqu %xmm1, (%rdi)
leaq (%rdi,%r12,2), %rdi
subl %r12d, %esi
vpermq $0x94, 192(%rdx), %ymm0
vpermq $0x94, 216(%rdx), %ymm1
vpshufb %ymm9, %ymm0, %ymm0
vpshufb %ymm9, %ymm1, %ymm1
vpsrlw $4, %ymm0, %ymm2
vpsrlw $4, %ymm1, %ymm3
vpblendw $0xaa, %ymm2, %ymm0, %ymm0
vpblendw $0xaa, %ymm3, %ymm1, %ymm1
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpcmpgtw %ymm0, %ymm6, %ymm2
vpcmpgtw %ymm1, %ymm6, %ymm3
vpacksswb %ymm3, %ymm2, %ymm2
vpmovmskb %ymm2, %rbx
movzbl %bl, %r10d
movzbl %bh, %ecx
movq %rbx, %r11
movq %rbx, %r12
shrq $16, %r11
shrq $24, %r12
andq $0xff, %r11
andq $0xff, %r12
movq (%r9,%r10,8), %xmm2
movq (%r9,%rcx,8), %xmm3
movq (%r9,%r11,8), %xmm4
movq (%r9,%r12,8), %xmm5
vinserti128 $0x01, %xmm4, %ymm2, %ymm2
vinserti128 $0x01, %xmm5, %ymm3, %ymm3
vpaddb %ymm7, %ymm2, %ymm4
vpaddb %ymm7, %ymm3, %ymm5
vpunpcklbw %ymm4, %ymm2, %ymm2
vpunpcklbw %ymm5, %ymm3, %ymm3
vpshufb %ymm2, %ymm0, %ymm0
vpshufb %ymm3, %ymm1, %ymm1
movq %rbx, %r10
movq %rbx, %r11
movq %rbx, %r12
andq $0xff, %rbx
shrq $16, %r10
shrq $8, %r11
shrq $24, %r12
andq $0xff, %r10
andq $0xff, %r11
popcntl %ebx, %ebx
popcntl %r10d, %r10d
popcntl %r11d, %r11d
popcntl %r12d, %r12d
vmovdqu %xmm0, (%rdi)
vextracti128 $0x01, %ymm0, %xmm0
leaq (%rdi,%rbx,2), %rdi
subl %ebx, %esi
vmovdqu %xmm0, (%rdi)
leaq (%rdi,%r10,2), %rdi
subl %r10d, %esi
vmovdqu %xmm1, (%rdi)
vextracti128 $0x01, %ymm1, %xmm1
leaq (%rdi,%r11,2), %rdi
subl %r11d, %esi
vmovdqu %xmm1, (%rdi)
leaq (%rdi,%r12,2), %rdi
subl %r12d, %esi
vpermq $0x94, 240(%rdx), %ymm0
vpermq $0x94, 264(%rdx), %ymm1
vpshufb %ymm9, %ymm0, %ymm0
vpshufb %ymm9, %ymm1, %ymm1
vpsrlw $4, %ymm0, %ymm2
vpsrlw $4, %ymm1, %ymm3
vpblendw $0xaa, %ymm2, %ymm0, %ymm0
vpblendw $0xaa, %ymm3, %ymm1, %ymm1
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpcmpgtw %ymm0, %ymm6, %ymm2
vpcmpgtw %ymm1, %ymm6, %ymm3
vpacksswb %ymm3, %ymm2, %ymm2
vpmovmskb %ymm2, %rbx
movzbl %bl, %r10d
movzbl %bh, %ecx
movq %rbx, %r11
movq %rbx, %r12
shrq $16, %r11
shrq $24, %r12
andq $0xff, %r11
andq $0xff, %r12
movq (%r9,%r10,8), %xmm2
movq (%r9,%rcx,8), %xmm3
movq (%r9,%r11,8), %xmm4
movq (%r9,%r12,8), %xmm5
vinserti128 $0x01, %xmm4, %ymm2, %ymm2
vinserti128 $0x01, %xmm5, %ymm3, %ymm3
vpaddb %ymm7, %ymm2, %ymm4
vpaddb %ymm7, %ymm3, %ymm5
vpunpcklbw %ymm4, %ymm2, %ymm2
vpunpcklbw %ymm5, %ymm3, %ymm3
vpshufb %ymm2, %ymm0, %ymm0
vpshufb %ymm3, %ymm1, %ymm1
movq %rbx, %r10
movq %rbx, %r11
movq %rbx, %r12
andq $0xff, %rbx
shrq $16, %r10
shrq $8, %r11
shrq $24, %r12
andq $0xff, %r10
andq $0xff, %r11
popcntl %ebx, %ebx
popcntl %r10d, %r10d
popcntl %r11d, %r11d
popcntl %r12d, %r12d
vmovdqu %xmm0, (%rdi)
vextracti128 $0x01, %ymm0, %xmm0
leaq (%rdi,%rbx,2), %rdi
subl %ebx, %esi
vmovdqu %xmm0, (%rdi)
leaq (%rdi,%r10,2), %rdi
subl %r10d, %esi
vmovdqu %xmm1, (%rdi)
vextracti128 $0x01, %ymm1, %xmm1
leaq (%rdi,%r11,2), %rdi
subl %r11d, %esi
vmovdqu %xmm1, (%rdi)
leaq (%rdi,%r12,2), %rdi
subl %r12d, %esi
vpermq $0x94, 288(%rdx), %ymm0
vpermq $0x94, 312(%rdx), %ymm1
vpshufb %ymm9, %ymm0, %ymm0
vpshufb %ymm9, %ymm1, %ymm1
vpsrlw $4, %ymm0, %ymm2
vpsrlw $4, %ymm1, %ymm3
vpblendw $0xaa, %ymm2, %ymm0, %ymm0
vpblendw $0xaa, %ymm3, %ymm1, %ymm1
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpcmpgtw %ymm0, %ymm6, %ymm2
vpcmpgtw %ymm1, %ymm6, %ymm3
vpacksswb %ymm3, %ymm2, %ymm2
vpmovmskb %ymm2, %rbx
movzbl %bl, %r10d
movzbl %bh, %ecx
movq %rbx, %r11
movq %rbx, %r12
shrq $16, %r11
shrq $24, %r12
andq $0xff, %r11
andq $0xff, %r12
movq (%r9,%r10,8), %xmm2
movq (%r9,%rcx,8), %xmm3
movq (%r9,%r11,8), %xmm4
movq (%r9,%r12,8), %xmm5
vinserti128 $0x01, %xmm4, %ymm2, %ymm2
vinserti128 $0x01, %xmm5, %ymm3, %ymm3
vpaddb %ymm7, %ymm2, %ymm4
vpaddb %ymm7, %ymm3, %ymm5
vpunpcklbw %ymm4, %ymm2, %ymm2
vpunpcklbw %ymm5, %ymm3, %ymm3
vpshufb %ymm2, %ymm0, %ymm0
vpshufb %ymm3, %ymm1, %ymm1
movq %rbx, %r10
movq %rbx, %r11
movq %rbx, %r12
andq $0xff, %rbx
shrq $16, %r10
shrq $8, %r11
shrq $24, %r12
andq $0xff, %r10
andq $0xff, %r11
popcntl %ebx, %ebx
popcntl %r10d, %r10d
popcntl %r11d, %r11d
popcntl %r12d, %r12d
vmovdqu %xmm0, (%rdi)
vextracti128 $0x01, %ymm0, %xmm0
leaq (%rdi,%rbx,2), %rdi
subl %ebx, %esi
vmovdqu %xmm0, (%rdi)
leaq (%rdi,%r10,2), %rdi
subl %r10d, %esi
vmovdqu %xmm1, (%rdi)
vextracti128 $0x01, %ymm1, %xmm1
leaq (%rdi,%r11,2), %rdi
subl %r11d, %esi
vmovdqu %xmm1, (%rdi)
leaq (%rdi,%r12,2), %rdi
subl %r12d, %esi
addq $0x150, %rdx
subl $0x150, %r8d
L_kyber_rej_uniform_n_avx2_start_256:
vpermq $0x94, (%rdx), %ymm0
vpermq $0x94, 24(%rdx), %ymm1
vpshufb %ymm9, %ymm0, %ymm0
vpshufb %ymm9, %ymm1, %ymm1
vpsrlw $4, %ymm0, %ymm2
vpsrlw $4, %ymm1, %ymm3
vpblendw $0xaa, %ymm2, %ymm0, %ymm0
vpblendw $0xaa, %ymm3, %ymm1, %ymm1
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpcmpgtw %ymm0, %ymm6, %ymm2
vpcmpgtw %ymm1, %ymm6, %ymm3
vpacksswb %ymm3, %ymm2, %ymm2
vpmovmskb %ymm2, %rbx
movzbl %bl, %r10d
movzbl %bh, %ecx
movq %rbx, %r11
movq %rbx, %r12
shrq $16, %r11
shrq $24, %r12
andq $0xff, %r11
andq $0xff, %r12
movq (%r9,%r10,8), %xmm2
movq (%r9,%rcx,8), %xmm3
movq (%r9,%r11,8), %xmm4
movq (%r9,%r12,8), %xmm5
vinserti128 $0x01, %xmm4, %ymm2, %ymm2
vinserti128 $0x01, %xmm5, %ymm3, %ymm3
vpaddb %ymm7, %ymm2, %ymm4
vpaddb %ymm7, %ymm3, %ymm5
vpunpcklbw %ymm4, %ymm2, %ymm2
vpunpcklbw %ymm5, %ymm3, %ymm3
vpshufb %ymm2, %ymm0, %ymm0
vpshufb %ymm3, %ymm1, %ymm1
movq %rbx, %r10
movq %rbx, %r11
movq %rbx, %r12
andq $0xff, %rbx
shrq $16, %r10
shrq $8, %r11
shrq $24, %r12
andq $0xff, %r10
andq $0xff, %r11
popcntl %ebx, %ebx
popcntl %r10d, %r10d
popcntl %r11d, %r11d
popcntl %r12d, %r12d
vmovdqu %xmm0, (%rdi)
vextracti128 $0x01, %ymm0, %xmm0
leaq (%rdi,%rbx,2), %rdi
subl %ebx, %esi
vmovdqu %xmm0, (%rdi)
leaq (%rdi,%r10,2), %rdi
subl %r10d, %esi
vmovdqu %xmm1, (%rdi)
vextracti128 $0x01, %ymm1, %xmm1
leaq (%rdi,%r11,2), %rdi
subl %r11d, %esi
vmovdqu %xmm1, (%rdi)
leaq (%rdi,%r12,2), %rdi
subl %r12d, %esi
addq $48, %rdx
subl $48, %r8d
cmpl $48, %r8d
jl L_kyber_rej_uniform_n_avx2_done_256
cmpl $32, %esi
jge L_kyber_rej_uniform_n_avx2_start_256
L_kyber_rej_uniform_n_avx2_done_256:
cmpl $8, %esi
jl L_kyber_rej_uniform_n_avx2_done_128
cmpl $12, %r8d
jl L_kyber_rej_uniform_n_avx2_done_128
L_kyber_rej_uniform_n_avx2_start_128:
vmovdqu (%rdx), %xmm0
vpshufb %xmm9, %xmm0, %xmm0
vpsrlw $4, %xmm0, %xmm2
vpblendw $0xaa, %xmm2, %xmm0, %xmm0
vpand %xmm8, %xmm0, %xmm0
vpcmpgtw %xmm0, %xmm6, %xmm2
vpmovmskb %xmm2, %rbx
movq $0x5555, %r10
pextl %r10d, %ebx, %ebx
movq (%r9,%rbx,8), %xmm3
vpaddb %xmm7, %xmm3, %xmm4
vpunpcklbw %xmm4, %xmm3, %xmm3
vpshufb %xmm3, %xmm0, %xmm0
vmovdqu %xmm0, (%rdi)
popcntl %ebx, %ecx
leaq (%rdi,%rcx,2), %rdi
subl %ecx, %esi
addq $12, %rdx
subl $12, %r8d
cmpl $12, %r8d
jl L_kyber_rej_uniform_n_avx2_done_128
cmpl $8, %esi
jge L_kyber_rej_uniform_n_avx2_start_128
L_kyber_rej_uniform_n_avx2_done_128:
cmpl $0x00, %r8d
je L_kyber_rej_uniform_n_avx2_done_64
cmpl $0x00, %esi
je L_kyber_rej_uniform_n_avx2_done_64
movq $0xfff0fff0fff0fff, %r15
movq $0x2000200020002000, %r10
movq $0xd010d010d010d01, %r11
movq $0x1000100010001000, %r12
L_kyber_rej_uniform_n_avx2_start_64:
movq (%rdx), %rcx
pdepq %r15, %rcx, %rcx
cmpw $0xd01, %cx
jge L_kyber_rej_uniform_0_avx2_rej_large_0
movw %cx, (%rdi)
addq $2, %rdi
subl $0x01, %esi
je L_kyber_rej_uniform_n_avx2_done_64
L_kyber_rej_uniform_0_avx2_rej_large_0:
shrq $16, %rcx
cmpw $0xd01, %cx
jge L_kyber_rej_uniform_0_avx2_rej_large_1
movw %cx, (%rdi)
addq $2, %rdi
subl $0x01, %esi
je L_kyber_rej_uniform_n_avx2_done_64
L_kyber_rej_uniform_0_avx2_rej_large_1:
shrq $16, %rcx
cmpw $0xd01, %cx
jge L_kyber_rej_uniform_0_avx2_rej_large_2
movw %cx, (%rdi)
addq $2, %rdi
subl $0x01, %esi
je L_kyber_rej_uniform_n_avx2_done_64
L_kyber_rej_uniform_0_avx2_rej_large_2:
shrq $16, %rcx
cmpw $0xd01, %cx
jge L_kyber_rej_uniform_0_avx2_rej_large_3
movw %cx, (%rdi)
addq $2, %rdi
subl $0x01, %esi
je L_kyber_rej_uniform_n_avx2_done_64
L_kyber_rej_uniform_0_avx2_rej_large_3:
addq $6, %rdx
subl $6, %r8d
jle L_kyber_rej_uniform_n_avx2_done_64
cmpl $0x00, %esi
jg L_kyber_rej_uniform_n_avx2_start_64
L_kyber_rej_uniform_n_avx2_done_64:
vzeroupper
subl %esi, %eax
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size kyber_rej_uniform_n_avx2,.-kyber_rej_uniform_n_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl kyber_rej_uniform_avx2
.type kyber_rej_uniform_avx2,@function
.align 16
kyber_rej_uniform_avx2:
#else
.section __TEXT,__text
.globl _kyber_rej_uniform_avx2
.p2align 4
_kyber_rej_uniform_avx2:
#endif /* __APPLE__ */
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %rbp
movq %rcx, %r8
movl %esi, %eax
cmpl $0x00, %esi
je L_kyber_rej_uniform_avx2_done_64
cmpl $8, %esi
jl L_kyber_rej_uniform_avx2_done_128
vmovdqu L_kyber_rej_q(%rip), %ymm6
vmovdqu L_kyber_rej_ones(%rip), %ymm7
vmovdqu L_kyber_rej_mask(%rip), %ymm8
vmovdqu L_kyber_rej_shuffle(%rip), %ymm9
leaq L_kyber_rej_idx(%rip), %r9
movq $0x1111111111111111, %r14
movq $0xe0c0a0806040200, %rbp
movq $0x101010101010101, %r13
cmpl $32, %esi
jl L_kyber_rej_uniform_avx2_done_256
vpermq $0x94, (%rdx), %ymm0
vpermq $0x94, 24(%rdx), %ymm1
vpshufb %ymm9, %ymm0, %ymm0
vpshufb %ymm9, %ymm1, %ymm1
vpsrlw $4, %ymm0, %ymm2
vpsrlw $4, %ymm1, %ymm3
vpblendw $0xaa, %ymm2, %ymm0, %ymm0
vpblendw $0xaa, %ymm3, %ymm1, %ymm1
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpcmpgtw %ymm0, %ymm6, %ymm2
vpcmpgtw %ymm1, %ymm6, %ymm3
vpacksswb %ymm3, %ymm2, %ymm2
vpmovmskb %ymm2, %rbx
movzbl %bl, %r10d
movzbl %bh, %ecx
movq %rbx, %r11
movq %rbx, %r12
shrq $16, %r11
shrq $24, %r12
andq $0xff, %r11
andq $0xff, %r12
movq (%r9,%r10,8), %xmm2
movq (%r9,%rcx,8), %xmm3
movq (%r9,%r11,8), %xmm4
movq (%r9,%r12,8), %xmm5
vinserti128 $0x01, %xmm4, %ymm2, %ymm2
vinserti128 $0x01, %xmm5, %ymm3, %ymm3
vpaddb %ymm7, %ymm2, %ymm4
vpaddb %ymm7, %ymm3, %ymm5
vpunpcklbw %ymm4, %ymm2, %ymm2
vpunpcklbw %ymm5, %ymm3, %ymm3
vpshufb %ymm2, %ymm0, %ymm0
vpshufb %ymm3, %ymm1, %ymm1
movq %rbx, %r10
movq %rbx, %r11
movq %rbx, %r12
andq $0xff, %rbx
shrq $16, %r10
shrq $8, %r11
shrq $24, %r12
andq $0xff, %r10
andq $0xff, %r11
popcntl %ebx, %ebx
popcntl %r10d, %r10d
popcntl %r11d, %r11d
popcntl %r12d, %r12d
vmovdqu %xmm0, (%rdi)
vextracti128 $0x01, %ymm0, %xmm0
leaq (%rdi,%rbx,2), %rdi
subl %ebx, %esi
vmovdqu %xmm0, (%rdi)
leaq (%rdi,%r10,2), %rdi
subl %r10d, %esi
vmovdqu %xmm1, (%rdi)
vextracti128 $0x01, %ymm1, %xmm1
leaq (%rdi,%r11,2), %rdi
subl %r11d, %esi
vmovdqu %xmm1, (%rdi)
leaq (%rdi,%r12,2), %rdi
subl %r12d, %esi
addq $48, %rdx
subl $48, %r8d
cmpl $32, %esi
jl L_kyber_rej_uniform_avx2_done_256
vpermq $0x94, (%rdx), %ymm0
vpermq $0x94, 24(%rdx), %ymm1
vpshufb %ymm9, %ymm0, %ymm0
vpshufb %ymm9, %ymm1, %ymm1
vpsrlw $4, %ymm0, %ymm2
vpsrlw $4, %ymm1, %ymm3
vpblendw $0xaa, %ymm2, %ymm0, %ymm0
vpblendw $0xaa, %ymm3, %ymm1, %ymm1
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpcmpgtw %ymm0, %ymm6, %ymm2
vpcmpgtw %ymm1, %ymm6, %ymm3
vpacksswb %ymm3, %ymm2, %ymm2
vpmovmskb %ymm2, %rbx
movzbl %bl, %r10d
movzbl %bh, %ecx
movq %rbx, %r11
movq %rbx, %r12
shrq $16, %r11
shrq $24, %r12
andq $0xff, %r11
andq $0xff, %r12
movq (%r9,%r10,8), %xmm2
movq (%r9,%rcx,8), %xmm3
movq (%r9,%r11,8), %xmm4
movq (%r9,%r12,8), %xmm5
vinserti128 $0x01, %xmm4, %ymm2, %ymm2
vinserti128 $0x01, %xmm5, %ymm3, %ymm3
vpaddb %ymm7, %ymm2, %ymm4
vpaddb %ymm7, %ymm3, %ymm5
vpunpcklbw %ymm4, %ymm2, %ymm2
vpunpcklbw %ymm5, %ymm3, %ymm3
vpshufb %ymm2, %ymm0, %ymm0
vpshufb %ymm3, %ymm1, %ymm1
movq %rbx, %r10
movq %rbx, %r11
movq %rbx, %r12
andq $0xff, %rbx
shrq $16, %r10
shrq $8, %r11
shrq $24, %r12
andq $0xff, %r10
andq $0xff, %r11
popcntl %ebx, %ebx
popcntl %r10d, %r10d
popcntl %r11d, %r11d
popcntl %r12d, %r12d
vmovdqu %xmm0, (%rdi)
vextracti128 $0x01, %ymm0, %xmm0
leaq (%rdi,%rbx,2), %rdi
subl %ebx, %esi
vmovdqu %xmm0, (%rdi)
leaq (%rdi,%r10,2), %rdi
subl %r10d, %esi
vmovdqu %xmm1, (%rdi)
vextracti128 $0x01, %ymm1, %xmm1
leaq (%rdi,%r11,2), %rdi
subl %r11d, %esi
vmovdqu %xmm1, (%rdi)
leaq (%rdi,%r12,2), %rdi
subl %r12d, %esi
addq $48, %rdx
subl $48, %r8d
cmpl $32, %esi
jl L_kyber_rej_uniform_avx2_done_256
L_kyber_rej_uniform_avx2_start_256:
vpermq $0x94, (%rdx), %ymm0
vpermq $0x94, 24(%rdx), %ymm1
vpshufb %ymm9, %ymm0, %ymm0
vpshufb %ymm9, %ymm1, %ymm1
vpsrlw $4, %ymm0, %ymm2
vpsrlw $4, %ymm1, %ymm3
vpblendw $0xaa, %ymm2, %ymm0, %ymm0
vpblendw $0xaa, %ymm3, %ymm1, %ymm1
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpcmpgtw %ymm0, %ymm6, %ymm2
vpcmpgtw %ymm1, %ymm6, %ymm3
vpacksswb %ymm3, %ymm2, %ymm2
vpmovmskb %ymm2, %rbx
movzbl %bl, %r10d
movzbl %bh, %ecx
movq %rbx, %r11
movq %rbx, %r12
shrq $16, %r11
shrq $24, %r12
andq $0xff, %r11
andq $0xff, %r12
movq (%r9,%r10,8), %xmm2
movq (%r9,%rcx,8), %xmm3
movq (%r9,%r11,8), %xmm4
movq (%r9,%r12,8), %xmm5
vinserti128 $0x01, %xmm4, %ymm2, %ymm2
vinserti128 $0x01, %xmm5, %ymm3, %ymm3
vpaddb %ymm7, %ymm2, %ymm4
vpaddb %ymm7, %ymm3, %ymm5
vpunpcklbw %ymm4, %ymm2, %ymm2
vpunpcklbw %ymm5, %ymm3, %ymm3
vpshufb %ymm2, %ymm0, %ymm0
vpshufb %ymm3, %ymm1, %ymm1
movq %rbx, %r10
movq %rbx, %r11
movq %rbx, %r12
andq $0xff, %rbx
shrq $16, %r10
shrq $8, %r11
shrq $24, %r12
andq $0xff, %r10
andq $0xff, %r11
popcntl %ebx, %ebx
popcntl %r10d, %r10d
popcntl %r11d, %r11d
popcntl %r12d, %r12d
vmovdqu %xmm0, (%rdi)
vextracti128 $0x01, %ymm0, %xmm0
leaq (%rdi,%rbx,2), %rdi
subl %ebx, %esi
vmovdqu %xmm0, (%rdi)
leaq (%rdi,%r10,2), %rdi
subl %r10d, %esi
vmovdqu %xmm1, (%rdi)
vextracti128 $0x01, %ymm1, %xmm1
leaq (%rdi,%r11,2), %rdi
subl %r11d, %esi
vmovdqu %xmm1, (%rdi)
leaq (%rdi,%r12,2), %rdi
subl %r12d, %esi
addq $48, %rdx
subl $48, %r8d
cmpl $48, %r8d
jl L_kyber_rej_uniform_avx2_done_256
cmpl $32, %esi
jge L_kyber_rej_uniform_avx2_start_256
L_kyber_rej_uniform_avx2_done_256:
cmpl $8, %esi
jl L_kyber_rej_uniform_avx2_done_128
cmpl $12, %r8d
jl L_kyber_rej_uniform_avx2_done_128
L_kyber_rej_uniform_avx2_start_128:
vmovdqu (%rdx), %xmm0
vpshufb %xmm9, %xmm0, %xmm0
vpsrlw $4, %xmm0, %xmm2
vpblendw $0xaa, %xmm2, %xmm0, %xmm0
vpand %xmm8, %xmm0, %xmm0
vpcmpgtw %xmm0, %xmm6, %xmm2
vpmovmskb %xmm2, %rbx
movq $0x5555, %r10
pextl %r10d, %ebx, %ebx
movq (%r9,%rbx,8), %xmm3
vpaddb %xmm7, %xmm3, %xmm4
vpunpcklbw %xmm4, %xmm3, %xmm3
vpshufb %xmm3, %xmm0, %xmm0
vmovdqu %xmm0, (%rdi)
popcntl %ebx, %ecx
leaq (%rdi,%rcx,2), %rdi
subl %ecx, %esi
addq $12, %rdx
subl $12, %r8d
cmpl $12, %r8d
jl L_kyber_rej_uniform_avx2_done_128
cmpl $8, %esi
jge L_kyber_rej_uniform_avx2_start_128
L_kyber_rej_uniform_avx2_done_128:
cmpl $0x00, %r8d
je L_kyber_rej_uniform_avx2_done_64
cmpl $0x00, %esi
je L_kyber_rej_uniform_avx2_done_64
movq $0xfff0fff0fff0fff, %r15
movq $0x2000200020002000, %r10
movq $0xd010d010d010d01, %r11
movq $0x1000100010001000, %r12
L_kyber_rej_uniform_avx2_start_64:
movq (%rdx), %rcx
pdepq %r15, %rcx, %rcx
cmpw $0xd01, %cx
jge L_kyber_rej_uniform_avx2_rej_large_0
movw %cx, (%rdi)
addq $2, %rdi
subl $0x01, %esi
je L_kyber_rej_uniform_avx2_done_64
L_kyber_rej_uniform_avx2_rej_large_0:
shrq $16, %rcx
cmpw $0xd01, %cx
jge L_kyber_rej_uniform_avx2_rej_large_1
movw %cx, (%rdi)
addq $2, %rdi
subl $0x01, %esi
je L_kyber_rej_uniform_avx2_done_64
L_kyber_rej_uniform_avx2_rej_large_1:
shrq $16, %rcx
cmpw $0xd01, %cx
jge L_kyber_rej_uniform_avx2_rej_large_2
movw %cx, (%rdi)
addq $2, %rdi
subl $0x01, %esi
je L_kyber_rej_uniform_avx2_done_64
L_kyber_rej_uniform_avx2_rej_large_2:
shrq $16, %rcx
cmpw $0xd01, %cx
jge L_kyber_rej_uniform_avx2_rej_large_3
movw %cx, (%rdi)
addq $2, %rdi
subl $0x01, %esi
je L_kyber_rej_uniform_avx2_done_64
L_kyber_rej_uniform_avx2_rej_large_3:
addq $6, %rdx
subl $6, %r8d
jle L_kyber_rej_uniform_avx2_done_64
cmpl $0x00, %esi
jg L_kyber_rej_uniform_avx2_start_64
L_kyber_rej_uniform_avx2_done_64:
vzeroupper
subl %esi, %eax
popq %rbp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
repz retq
#ifndef __APPLE__
.size kyber_rej_uniform_avx2,.-kyber_rej_uniform_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_mask_249:
.quad 0x24924900249249, 0x24924900249249
.quad 0x24924900249249, 0x24924900249249
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_mask_6db:
.quad 0x6db6db006db6db, 0x6db6db006db6db
.quad 0x6db6db006db6db, 0x6db6db006db6db
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_mask_07:
.quad 0x700000007, 0x700000007
.quad 0x700000007, 0x700000007
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_mask_70:
.quad 0x7000000070000, 0x7000000070000
.quad 0x7000000070000, 0x7000000070000
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_mask_3:
.quad 0x3000300030003, 0x3000300030003
.quad 0x3000300030003, 0x3000300030003
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_shuff:
.quad 0xff050403ff020100, 0xff0b0a09ff080706
.quad 0xff090807ff060504, 0xff0f0e0dff0c0b0a
#ifndef __APPLE__
.text
.globl kyber_cbd_eta3_avx2
.type kyber_cbd_eta3_avx2,@function
.align 16
kyber_cbd_eta3_avx2:
#else
.section __TEXT,__text
.globl _kyber_cbd_eta3_avx2
.p2align 4
_kyber_cbd_eta3_avx2:
#endif /* __APPLE__ */
vmovdqu L_kyber_mask_249(%rip), %ymm8
vmovdqu L_kyber_mask_6db(%rip), %ymm9
vmovdqu L_kyber_mask_07(%rip), %ymm10
vmovdqu L_kyber_mask_70(%rip), %ymm11
vmovdqu L_kyber_mask_3(%rip), %ymm12
vmovdqu L_kyber_shuff(%rip), %ymm13
vmovdqu (%rsi), %ymm0
vmovdqu 24(%rsi), %ymm1
vpermq $0x94, %ymm0, %ymm0
vpermq $0x94, %ymm1, %ymm1
vpshufb %ymm13, %ymm0, %ymm0
vpshufb %ymm13, %ymm1, %ymm1
vpsrld $0x01, %ymm0, %ymm2
vpsrld $0x01, %ymm1, %ymm3
vpsrld $2, %ymm0, %ymm4
vpsrld $2, %ymm1, %ymm5
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpand %ymm8, %ymm2, %ymm2
vpand %ymm8, %ymm3, %ymm3
vpand %ymm8, %ymm4, %ymm4
vpand %ymm8, %ymm5, %ymm5
vpaddd %ymm2, %ymm0, %ymm0
vpaddd %ymm3, %ymm1, %ymm1
vpaddd %ymm4, %ymm0, %ymm0
vpaddd %ymm5, %ymm1, %ymm1
vpsrld $3, %ymm0, %ymm2
vpsrld $3, %ymm1, %ymm3
vpaddd %ymm9, %ymm0, %ymm0
vpaddd %ymm9, %ymm1, %ymm1
vpsubd %ymm2, %ymm0, %ymm0
vpsubd %ymm3, %ymm1, %ymm1
vpslld $10, %ymm0, %ymm2
vpslld $10, %ymm1, %ymm3
vpsrld $12, %ymm0, %ymm4
vpsrld $12, %ymm1, %ymm5
vpsrld $2, %ymm0, %ymm6
vpsrld $2, %ymm1, %ymm7
vpand %ymm10, %ymm0, %ymm0
vpand %ymm10, %ymm1, %ymm1
vpand %ymm11, %ymm2, %ymm2
vpand %ymm11, %ymm3, %ymm3
vpand %ymm10, %ymm4, %ymm4
vpand %ymm10, %ymm5, %ymm5
vpand %ymm11, %ymm6, %ymm6
vpand %ymm11, %ymm7, %ymm7
vpaddw %ymm2, %ymm0, %ymm0
vpaddw %ymm3, %ymm1, %ymm1
vpaddw %ymm6, %ymm4, %ymm2
vpaddw %ymm7, %ymm5, %ymm3
vpsubw %ymm12, %ymm0, %ymm0
vpsubw %ymm12, %ymm1, %ymm1
vpsubw %ymm12, %ymm2, %ymm2
vpsubw %ymm12, %ymm3, %ymm3
vpunpckldq %ymm2, %ymm0, %ymm4
vpunpckldq %ymm3, %ymm1, %ymm5
vpunpckhdq %ymm2, %ymm0, %ymm6
vpunpckhdq %ymm3, %ymm1, %ymm7
vperm2i128 $32, %ymm6, %ymm4, %ymm0
vperm2i128 $32, %ymm7, %ymm5, %ymm1
vperm2i128 $49, %ymm6, %ymm4, %ymm2
vperm2i128 $49, %ymm7, %ymm5, %ymm3
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm3, 96(%rdi)
vmovdqu 48(%rsi), %ymm0
vmovdqu 72(%rsi), %ymm1
vpermq $0x94, %ymm0, %ymm0
vpermq $0x94, %ymm1, %ymm1
vpshufb %ymm13, %ymm0, %ymm0
vpshufb %ymm13, %ymm1, %ymm1
vpsrld $0x01, %ymm0, %ymm2
vpsrld $0x01, %ymm1, %ymm3
vpsrld $2, %ymm0, %ymm4
vpsrld $2, %ymm1, %ymm5
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpand %ymm8, %ymm2, %ymm2
vpand %ymm8, %ymm3, %ymm3
vpand %ymm8, %ymm4, %ymm4
vpand %ymm8, %ymm5, %ymm5
vpaddd %ymm2, %ymm0, %ymm0
vpaddd %ymm3, %ymm1, %ymm1
vpaddd %ymm4, %ymm0, %ymm0
vpaddd %ymm5, %ymm1, %ymm1
vpsrld $3, %ymm0, %ymm2
vpsrld $3, %ymm1, %ymm3
vpaddd %ymm9, %ymm0, %ymm0
vpaddd %ymm9, %ymm1, %ymm1
vpsubd %ymm2, %ymm0, %ymm0
vpsubd %ymm3, %ymm1, %ymm1
vpslld $10, %ymm0, %ymm2
vpslld $10, %ymm1, %ymm3
vpsrld $12, %ymm0, %ymm4
vpsrld $12, %ymm1, %ymm5
vpsrld $2, %ymm0, %ymm6
vpsrld $2, %ymm1, %ymm7
vpand %ymm10, %ymm0, %ymm0
vpand %ymm10, %ymm1, %ymm1
vpand %ymm11, %ymm2, %ymm2
vpand %ymm11, %ymm3, %ymm3
vpand %ymm10, %ymm4, %ymm4
vpand %ymm10, %ymm5, %ymm5
vpand %ymm11, %ymm6, %ymm6
vpand %ymm11, %ymm7, %ymm7
vpaddw %ymm2, %ymm0, %ymm0
vpaddw %ymm3, %ymm1, %ymm1
vpaddw %ymm6, %ymm4, %ymm2
vpaddw %ymm7, %ymm5, %ymm3
vpsubw %ymm12, %ymm0, %ymm0
vpsubw %ymm12, %ymm1, %ymm1
vpsubw %ymm12, %ymm2, %ymm2
vpsubw %ymm12, %ymm3, %ymm3
vpunpckldq %ymm2, %ymm0, %ymm4
vpunpckldq %ymm3, %ymm1, %ymm5
vpunpckhdq %ymm2, %ymm0, %ymm6
vpunpckhdq %ymm3, %ymm1, %ymm7
vperm2i128 $32, %ymm6, %ymm4, %ymm0
vperm2i128 $32, %ymm7, %ymm5, %ymm1
vperm2i128 $49, %ymm6, %ymm4, %ymm2
vperm2i128 $49, %ymm7, %ymm5, %ymm3
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm2, 160(%rdi)
vmovdqu %ymm1, 192(%rdi)
vmovdqu %ymm3, 224(%rdi)
vmovdqu 96(%rsi), %ymm0
vmovdqu 120(%rsi), %ymm1
vpermq $0x94, %ymm0, %ymm0
vpermq $0x94, %ymm1, %ymm1
vpshufb %ymm13, %ymm0, %ymm0
vpshufb %ymm13, %ymm1, %ymm1
vpsrld $0x01, %ymm0, %ymm2
vpsrld $0x01, %ymm1, %ymm3
vpsrld $2, %ymm0, %ymm4
vpsrld $2, %ymm1, %ymm5
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpand %ymm8, %ymm2, %ymm2
vpand %ymm8, %ymm3, %ymm3
vpand %ymm8, %ymm4, %ymm4
vpand %ymm8, %ymm5, %ymm5
vpaddd %ymm2, %ymm0, %ymm0
vpaddd %ymm3, %ymm1, %ymm1
vpaddd %ymm4, %ymm0, %ymm0
vpaddd %ymm5, %ymm1, %ymm1
vpsrld $3, %ymm0, %ymm2
vpsrld $3, %ymm1, %ymm3
vpaddd %ymm9, %ymm0, %ymm0
vpaddd %ymm9, %ymm1, %ymm1
vpsubd %ymm2, %ymm0, %ymm0
vpsubd %ymm3, %ymm1, %ymm1
vpslld $10, %ymm0, %ymm2
vpslld $10, %ymm1, %ymm3
vpsrld $12, %ymm0, %ymm4
vpsrld $12, %ymm1, %ymm5
vpsrld $2, %ymm0, %ymm6
vpsrld $2, %ymm1, %ymm7
vpand %ymm10, %ymm0, %ymm0
vpand %ymm10, %ymm1, %ymm1
vpand %ymm11, %ymm2, %ymm2
vpand %ymm11, %ymm3, %ymm3
vpand %ymm10, %ymm4, %ymm4
vpand %ymm10, %ymm5, %ymm5
vpand %ymm11, %ymm6, %ymm6
vpand %ymm11, %ymm7, %ymm7
vpaddw %ymm2, %ymm0, %ymm0
vpaddw %ymm3, %ymm1, %ymm1
vpaddw %ymm6, %ymm4, %ymm2
vpaddw %ymm7, %ymm5, %ymm3
vpsubw %ymm12, %ymm0, %ymm0
vpsubw %ymm12, %ymm1, %ymm1
vpsubw %ymm12, %ymm2, %ymm2
vpsubw %ymm12, %ymm3, %ymm3
vpunpckldq %ymm2, %ymm0, %ymm4
vpunpckldq %ymm3, %ymm1, %ymm5
vpunpckhdq %ymm2, %ymm0, %ymm6
vpunpckhdq %ymm3, %ymm1, %ymm7
vperm2i128 $32, %ymm6, %ymm4, %ymm0
vperm2i128 $32, %ymm7, %ymm5, %ymm1
vperm2i128 $49, %ymm6, %ymm4, %ymm2
vperm2i128 $49, %ymm7, %ymm5, %ymm3
vmovdqu %ymm0, 256(%rdi)
vmovdqu %ymm2, 288(%rdi)
vmovdqu %ymm1, 320(%rdi)
vmovdqu %ymm3, 352(%rdi)
vmovdqu 144(%rsi), %ymm0
vmovdqu 168(%rsi), %ymm1
vpermq $0x94, %ymm0, %ymm0
vpermq $0x94, %ymm1, %ymm1
vpshufb %ymm13, %ymm0, %ymm0
vpshufb %ymm13, %ymm1, %ymm1
vpsrld $0x01, %ymm0, %ymm2
vpsrld $0x01, %ymm1, %ymm3
vpsrld $2, %ymm0, %ymm4
vpsrld $2, %ymm1, %ymm5
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpand %ymm8, %ymm2, %ymm2
vpand %ymm8, %ymm3, %ymm3
vpand %ymm8, %ymm4, %ymm4
vpand %ymm8, %ymm5, %ymm5
vpaddd %ymm2, %ymm0, %ymm0
vpaddd %ymm3, %ymm1, %ymm1
vpaddd %ymm4, %ymm0, %ymm0
vpaddd %ymm5, %ymm1, %ymm1
vpsrld $3, %ymm0, %ymm2
vpsrld $3, %ymm1, %ymm3
vpaddd %ymm9, %ymm0, %ymm0
vpaddd %ymm9, %ymm1, %ymm1
vpsubd %ymm2, %ymm0, %ymm0
vpsubd %ymm3, %ymm1, %ymm1
vpslld $10, %ymm0, %ymm2
vpslld $10, %ymm1, %ymm3
vpsrld $12, %ymm0, %ymm4
vpsrld $12, %ymm1, %ymm5
vpsrld $2, %ymm0, %ymm6
vpsrld $2, %ymm1, %ymm7
vpand %ymm10, %ymm0, %ymm0
vpand %ymm10, %ymm1, %ymm1
vpand %ymm11, %ymm2, %ymm2
vpand %ymm11, %ymm3, %ymm3
vpand %ymm10, %ymm4, %ymm4
vpand %ymm10, %ymm5, %ymm5
vpand %ymm11, %ymm6, %ymm6
vpand %ymm11, %ymm7, %ymm7
vpaddw %ymm2, %ymm0, %ymm0
vpaddw %ymm3, %ymm1, %ymm1
vpaddw %ymm6, %ymm4, %ymm2
vpaddw %ymm7, %ymm5, %ymm3
vpsubw %ymm12, %ymm0, %ymm0
vpsubw %ymm12, %ymm1, %ymm1
vpsubw %ymm12, %ymm2, %ymm2
vpsubw %ymm12, %ymm3, %ymm3
vpunpckldq %ymm2, %ymm0, %ymm4
vpunpckldq %ymm3, %ymm1, %ymm5
vpunpckhdq %ymm2, %ymm0, %ymm6
vpunpckhdq %ymm3, %ymm1, %ymm7
vperm2i128 $32, %ymm6, %ymm4, %ymm0
vperm2i128 $32, %ymm7, %ymm5, %ymm1
vperm2i128 $49, %ymm6, %ymm4, %ymm2
vperm2i128 $49, %ymm7, %ymm5, %ymm3
vmovdqu %ymm0, 384(%rdi)
vmovdqu %ymm2, 416(%rdi)
vmovdqu %ymm1, 448(%rdi)
vmovdqu %ymm3, 480(%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_cbd_eta3_avx2,.-kyber_cbd_eta3_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_mask_55:
.quad 0x5555555555555555, 0x5555555555555555
.quad 0x5555555555555555, 0x5555555555555555
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_mask_33:
.quad 0x3333333333333333, 0x3333333333333333
.quad 0x3333333333333333, 0x3333333333333333
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_mask_03:
.quad 0x303030303030303, 0x303030303030303
.quad 0x303030303030303, 0x303030303030303
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_mask_0f:
.quad 0xf0f0f0f0f0f0f0f, 0xf0f0f0f0f0f0f0f
.quad 0xf0f0f0f0f0f0f0f, 0xf0f0f0f0f0f0f0f
#ifndef __APPLE__
.text
.globl kyber_cbd_eta2_avx2
.type kyber_cbd_eta2_avx2,@function
.align 16
kyber_cbd_eta2_avx2:
#else
.section __TEXT,__text
.globl _kyber_cbd_eta2_avx2
.p2align 4
_kyber_cbd_eta2_avx2:
#endif /* __APPLE__ */
vmovdqu L_kyber_mask_55(%rip), %ymm8
vmovdqu L_kyber_mask_33(%rip), %ymm9
vmovdqu L_kyber_mask_03(%rip), %ymm10
vmovdqu L_kyber_mask_0f(%rip), %ymm11
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vpsrlw $0x01, %ymm0, %ymm2
vpsrlw $0x01, %ymm1, %ymm3
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpand %ymm8, %ymm2, %ymm2
vpand %ymm8, %ymm3, %ymm3
vpaddb %ymm2, %ymm0, %ymm0
vpaddb %ymm3, %ymm1, %ymm1
vpsrlw $2, %ymm0, %ymm2
vpsrlw $2, %ymm1, %ymm3
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpand %ymm9, %ymm2, %ymm2
vpand %ymm9, %ymm3, %ymm3
vpaddb %ymm9, %ymm0, %ymm0
vpaddb %ymm9, %ymm1, %ymm1
vpsubb %ymm2, %ymm0, %ymm0
vpsubb %ymm3, %ymm1, %ymm1
vpsrlw $4, %ymm0, %ymm2
vpsrlw $4, %ymm1, %ymm3
vpand %ymm11, %ymm0, %ymm0
vpand %ymm11, %ymm1, %ymm1
vpand %ymm11, %ymm2, %ymm2
vpand %ymm11, %ymm3, %ymm3
vpsubb %ymm10, %ymm0, %ymm0
vpsubb %ymm10, %ymm1, %ymm1
vpsubb %ymm10, %ymm2, %ymm2
vpsubb %ymm10, %ymm3, %ymm3
vpunpcklbw %ymm2, %ymm0, %ymm4
vpunpcklbw %ymm3, %ymm1, %ymm5
vpunpckhbw %ymm2, %ymm0, %ymm6
vpunpckhbw %ymm3, %ymm1, %ymm7
vpmovsxbw %xmm4, %ymm0
vpmovsxbw %xmm5, %ymm1
vextracti128 $0x01, %ymm4, %xmm2
vextracti128 $0x01, %ymm5, %xmm3
vpmovsxbw %xmm2, %ymm2
vpmovsxbw %xmm3, %ymm3
vpmovsxbw %xmm6, %ymm4
vpmovsxbw %xmm7, %ymm5
vextracti128 $0x01, %ymm6, %xmm6
vextracti128 $0x01, %ymm7, %xmm7
vpmovsxbw %xmm6, %ymm6
vpmovsxbw %xmm7, %ymm7
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm4, 32(%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm6, 96(%rdi)
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm5, 160(%rdi)
vmovdqu %ymm3, 192(%rdi)
vmovdqu %ymm7, 224(%rdi)
vmovdqu 64(%rsi), %ymm0
vmovdqu 96(%rsi), %ymm1
vpsrlw $0x01, %ymm0, %ymm2
vpsrlw $0x01, %ymm1, %ymm3
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpand %ymm8, %ymm2, %ymm2
vpand %ymm8, %ymm3, %ymm3
vpaddb %ymm2, %ymm0, %ymm0
vpaddb %ymm3, %ymm1, %ymm1
vpsrlw $2, %ymm0, %ymm2
vpsrlw $2, %ymm1, %ymm3
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpand %ymm9, %ymm2, %ymm2
vpand %ymm9, %ymm3, %ymm3
vpaddb %ymm9, %ymm0, %ymm0
vpaddb %ymm9, %ymm1, %ymm1
vpsubb %ymm2, %ymm0, %ymm0
vpsubb %ymm3, %ymm1, %ymm1
vpsrlw $4, %ymm0, %ymm2
vpsrlw $4, %ymm1, %ymm3
vpand %ymm11, %ymm0, %ymm0
vpand %ymm11, %ymm1, %ymm1
vpand %ymm11, %ymm2, %ymm2
vpand %ymm11, %ymm3, %ymm3
vpsubb %ymm10, %ymm0, %ymm0
vpsubb %ymm10, %ymm1, %ymm1
vpsubb %ymm10, %ymm2, %ymm2
vpsubb %ymm10, %ymm3, %ymm3
vpunpcklbw %ymm2, %ymm0, %ymm4
vpunpcklbw %ymm3, %ymm1, %ymm5
vpunpckhbw %ymm2, %ymm0, %ymm6
vpunpckhbw %ymm3, %ymm1, %ymm7
vpmovsxbw %xmm4, %ymm0
vpmovsxbw %xmm5, %ymm1
vextracti128 $0x01, %ymm4, %xmm2
vextracti128 $0x01, %ymm5, %xmm3
vpmovsxbw %xmm2, %ymm2
vpmovsxbw %xmm3, %ymm3
vpmovsxbw %xmm6, %ymm4
vpmovsxbw %xmm7, %ymm5
vextracti128 $0x01, %ymm6, %xmm6
vextracti128 $0x01, %ymm7, %xmm7
vpmovsxbw %xmm6, %ymm6
vpmovsxbw %xmm7, %ymm7
vmovdqu %ymm0, 256(%rdi)
vmovdqu %ymm4, 288(%rdi)
vmovdqu %ymm2, 320(%rdi)
vmovdqu %ymm6, 352(%rdi)
vmovdqu %ymm1, 384(%rdi)
vmovdqu %ymm5, 416(%rdi)
vmovdqu %ymm3, 448(%rdi)
vmovdqu %ymm7, 480(%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_cbd_eta2_avx2,.-kyber_cbd_eta2_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_10_avx2_mask:
.value 0x3ff,0x3ff
.value 0x3ff,0x3ff
.value 0x3ff,0x3ff
.value 0x3ff,0x3ff
.value 0x3ff,0x3ff
.value 0x3ff,0x3ff
.value 0x3ff,0x3ff
.value 0x3ff,0x3ff
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_compress_10_avx2_shift:
.quad 0x400000104000001, 0x400000104000001
.quad 0x400000104000001, 0x400000104000001
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_compress_10_avx2_shlv:
.quad 0xc, 0xc
.quad 0xc, 0xc
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_10_avx2_shuf:
.value 0x100,0x302
.value 0x804,0xa09
.value 0xc0b,0xffff
.value 0xffff,0xffff
.value 0xa09,0xc0b
.value 0xffff,0xffff
.value 0xffff,0x100
.value 0x302,0x804
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_10_avx2_v:
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_10_avx2_offset:
.value 0xf,0xf
.value 0xf,0xf
.value 0xf,0xf
.value 0xf,0xf
.value 0xf,0xf
.value 0xf,0xf
.value 0xf,0xf
.value 0xf,0xf
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_10_avx2_shift12:
.value 0x1000,0x1000
.value 0x1000,0x1000
.value 0x1000,0x1000
.value 0x1000,0x1000
.value 0x1000,0x1000
.value 0x1000,0x1000
.value 0x1000,0x1000
.value 0x1000,0x1000
#ifndef __APPLE__
.text
.globl kyber_compress_10_avx2
.type kyber_compress_10_avx2,@function
.align 16
kyber_compress_10_avx2:
#else
.section __TEXT,__text
.globl _kyber_compress_10_avx2
.p2align 4
_kyber_compress_10_avx2:
#endif /* __APPLE__ */
vmovdqu (%rsi), %ymm0
vmovdqu L_kyber_compress_10_avx2_mask(%rip), %ymm9
vmovdqu L_kyber_compress_10_avx2_shift(%rip), %ymm8
vmovdqu L_kyber_compress_10_avx2_shlv(%rip), %ymm10
vmovdqu L_kyber_compress_10_avx2_shuf(%rip), %ymm11
vmovdqu L_kyber_compress_10_avx2_v(%rip), %ymm6
vmovdqu L_kyber_compress_10_avx2_offset(%rip), %ymm12
vmovdqu L_kyber_compress_10_avx2_shift12(%rip), %ymm13
vpsllw $3, %ymm6, %ymm7
L_kyber_compress_10_avx2_start:
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vpmullw %ymm7, %ymm0, %ymm2
vpmullw %ymm7, %ymm1, %ymm4
vpaddw %ymm12, %ymm0, %ymm3
vpaddw %ymm12, %ymm1, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm1, %ymm1
vpmulhuw %ymm6, %ymm0, %ymm0
vpmulhuw %ymm6, %ymm1, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm3, %ymm2, %ymm2
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm2, %ymm2
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm2, %ymm0, %ymm0
vpsubw %ymm4, %ymm1, %ymm1
vpmulhrsw %ymm13, %ymm0, %ymm0
vpmulhrsw %ymm13, %ymm1, %ymm1
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpmaddwd %ymm8, %ymm0, %ymm0
vpmaddwd %ymm8, %ymm1, %ymm1
vpsllvd %ymm10, %ymm0, %ymm0
vpsllvd %ymm10, %ymm1, %ymm1
vpsrlq $12, %ymm0, %ymm0
vpsrlq $12, %ymm1, %ymm1
vpshufb %ymm11, %ymm0, %ymm0
vpshufb %ymm11, %ymm1, %ymm1
vextracti128 $0x01, %ymm0, %xmm2
vextracti128 $0x01, %ymm1, %xmm4
vpblendw $0xe0, %xmm2, %xmm0, %xmm0
vpblendw $0xe0, %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, (%rdi)
vmovdqu %xmm1, 20(%rdi)
vmovss %xmm2, 16(%rdi)
vmovss %xmm4, 36(%rdi)
vmovdqu 64(%rsi), %ymm0
vmovdqu 96(%rsi), %ymm1
vpmullw %ymm7, %ymm0, %ymm2
vpmullw %ymm7, %ymm1, %ymm4
vpaddw %ymm12, %ymm0, %ymm3
vpaddw %ymm12, %ymm1, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm1, %ymm1
vpmulhuw %ymm6, %ymm0, %ymm0
vpmulhuw %ymm6, %ymm1, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm3, %ymm2, %ymm2
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm2, %ymm2
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm2, %ymm0, %ymm0
vpsubw %ymm4, %ymm1, %ymm1
vpmulhrsw %ymm13, %ymm0, %ymm0
vpmulhrsw %ymm13, %ymm1, %ymm1
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpmaddwd %ymm8, %ymm0, %ymm0
vpmaddwd %ymm8, %ymm1, %ymm1
vpsllvd %ymm10, %ymm0, %ymm0
vpsllvd %ymm10, %ymm1, %ymm1
vpsrlq $12, %ymm0, %ymm0
vpsrlq $12, %ymm1, %ymm1
vpshufb %ymm11, %ymm0, %ymm0
vpshufb %ymm11, %ymm1, %ymm1
vextracti128 $0x01, %ymm0, %xmm2
vextracti128 $0x01, %ymm1, %xmm4
vpblendw $0xe0, %xmm2, %xmm0, %xmm0
vpblendw $0xe0, %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, 40(%rdi)
vmovdqu %xmm1, 60(%rdi)
vmovss %xmm2, 56(%rdi)
vmovss %xmm4, 76(%rdi)
vmovdqu 128(%rsi), %ymm0
vmovdqu 160(%rsi), %ymm1
vpmullw %ymm7, %ymm0, %ymm2
vpmullw %ymm7, %ymm1, %ymm4
vpaddw %ymm12, %ymm0, %ymm3
vpaddw %ymm12, %ymm1, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm1, %ymm1
vpmulhuw %ymm6, %ymm0, %ymm0
vpmulhuw %ymm6, %ymm1, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm3, %ymm2, %ymm2
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm2, %ymm2
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm2, %ymm0, %ymm0
vpsubw %ymm4, %ymm1, %ymm1
vpmulhrsw %ymm13, %ymm0, %ymm0
vpmulhrsw %ymm13, %ymm1, %ymm1
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpmaddwd %ymm8, %ymm0, %ymm0
vpmaddwd %ymm8, %ymm1, %ymm1
vpsllvd %ymm10, %ymm0, %ymm0
vpsllvd %ymm10, %ymm1, %ymm1
vpsrlq $12, %ymm0, %ymm0
vpsrlq $12, %ymm1, %ymm1
vpshufb %ymm11, %ymm0, %ymm0
vpshufb %ymm11, %ymm1, %ymm1
vextracti128 $0x01, %ymm0, %xmm2
vextracti128 $0x01, %ymm1, %xmm4
vpblendw $0xe0, %xmm2, %xmm0, %xmm0
vpblendw $0xe0, %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, 80(%rdi)
vmovdqu %xmm1, 100(%rdi)
vmovss %xmm2, 96(%rdi)
vmovss %xmm4, 116(%rdi)
vmovdqu 192(%rsi), %ymm0
vmovdqu 224(%rsi), %ymm1
vpmullw %ymm7, %ymm0, %ymm2
vpmullw %ymm7, %ymm1, %ymm4
vpaddw %ymm12, %ymm0, %ymm3
vpaddw %ymm12, %ymm1, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm1, %ymm1
vpmulhuw %ymm6, %ymm0, %ymm0
vpmulhuw %ymm6, %ymm1, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm3, %ymm2, %ymm2
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm2, %ymm2
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm2, %ymm0, %ymm0
vpsubw %ymm4, %ymm1, %ymm1
vpmulhrsw %ymm13, %ymm0, %ymm0
vpmulhrsw %ymm13, %ymm1, %ymm1
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpmaddwd %ymm8, %ymm0, %ymm0
vpmaddwd %ymm8, %ymm1, %ymm1
vpsllvd %ymm10, %ymm0, %ymm0
vpsllvd %ymm10, %ymm1, %ymm1
vpsrlq $12, %ymm0, %ymm0
vpsrlq $12, %ymm1, %ymm1
vpshufb %ymm11, %ymm0, %ymm0
vpshufb %ymm11, %ymm1, %ymm1
vextracti128 $0x01, %ymm0, %xmm2
vextracti128 $0x01, %ymm1, %xmm4
vpblendw $0xe0, %xmm2, %xmm0, %xmm0
vpblendw $0xe0, %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, 120(%rdi)
vmovdqu %xmm1, 140(%rdi)
vmovss %xmm2, 136(%rdi)
vmovss %xmm4, 156(%rdi)
vmovdqu 256(%rsi), %ymm0
vmovdqu 288(%rsi), %ymm1
vpmullw %ymm7, %ymm0, %ymm2
vpmullw %ymm7, %ymm1, %ymm4
vpaddw %ymm12, %ymm0, %ymm3
vpaddw %ymm12, %ymm1, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm1, %ymm1
vpmulhuw %ymm6, %ymm0, %ymm0
vpmulhuw %ymm6, %ymm1, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm3, %ymm2, %ymm2
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm2, %ymm2
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm2, %ymm0, %ymm0
vpsubw %ymm4, %ymm1, %ymm1
vpmulhrsw %ymm13, %ymm0, %ymm0
vpmulhrsw %ymm13, %ymm1, %ymm1
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpmaddwd %ymm8, %ymm0, %ymm0
vpmaddwd %ymm8, %ymm1, %ymm1
vpsllvd %ymm10, %ymm0, %ymm0
vpsllvd %ymm10, %ymm1, %ymm1
vpsrlq $12, %ymm0, %ymm0
vpsrlq $12, %ymm1, %ymm1
vpshufb %ymm11, %ymm0, %ymm0
vpshufb %ymm11, %ymm1, %ymm1
vextracti128 $0x01, %ymm0, %xmm2
vextracti128 $0x01, %ymm1, %xmm4
vpblendw $0xe0, %xmm2, %xmm0, %xmm0
vpblendw $0xe0, %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, 160(%rdi)
vmovdqu %xmm1, 180(%rdi)
vmovss %xmm2, 176(%rdi)
vmovss %xmm4, 196(%rdi)
vmovdqu 320(%rsi), %ymm0
vmovdqu 352(%rsi), %ymm1
vpmullw %ymm7, %ymm0, %ymm2
vpmullw %ymm7, %ymm1, %ymm4
vpaddw %ymm12, %ymm0, %ymm3
vpaddw %ymm12, %ymm1, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm1, %ymm1
vpmulhuw %ymm6, %ymm0, %ymm0
vpmulhuw %ymm6, %ymm1, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm3, %ymm2, %ymm2
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm2, %ymm2
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm2, %ymm0, %ymm0
vpsubw %ymm4, %ymm1, %ymm1
vpmulhrsw %ymm13, %ymm0, %ymm0
vpmulhrsw %ymm13, %ymm1, %ymm1
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpmaddwd %ymm8, %ymm0, %ymm0
vpmaddwd %ymm8, %ymm1, %ymm1
vpsllvd %ymm10, %ymm0, %ymm0
vpsllvd %ymm10, %ymm1, %ymm1
vpsrlq $12, %ymm0, %ymm0
vpsrlq $12, %ymm1, %ymm1
vpshufb %ymm11, %ymm0, %ymm0
vpshufb %ymm11, %ymm1, %ymm1
vextracti128 $0x01, %ymm0, %xmm2
vextracti128 $0x01, %ymm1, %xmm4
vpblendw $0xe0, %xmm2, %xmm0, %xmm0
vpblendw $0xe0, %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, 200(%rdi)
vmovdqu %xmm1, 220(%rdi)
vmovss %xmm2, 216(%rdi)
vmovss %xmm4, 236(%rdi)
vmovdqu 384(%rsi), %ymm0
vmovdqu 416(%rsi), %ymm1
vpmullw %ymm7, %ymm0, %ymm2
vpmullw %ymm7, %ymm1, %ymm4
vpaddw %ymm12, %ymm0, %ymm3
vpaddw %ymm12, %ymm1, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm1, %ymm1
vpmulhuw %ymm6, %ymm0, %ymm0
vpmulhuw %ymm6, %ymm1, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm3, %ymm2, %ymm2
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm2, %ymm2
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm2, %ymm0, %ymm0
vpsubw %ymm4, %ymm1, %ymm1
vpmulhrsw %ymm13, %ymm0, %ymm0
vpmulhrsw %ymm13, %ymm1, %ymm1
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpmaddwd %ymm8, %ymm0, %ymm0
vpmaddwd %ymm8, %ymm1, %ymm1
vpsllvd %ymm10, %ymm0, %ymm0
vpsllvd %ymm10, %ymm1, %ymm1
vpsrlq $12, %ymm0, %ymm0
vpsrlq $12, %ymm1, %ymm1
vpshufb %ymm11, %ymm0, %ymm0
vpshufb %ymm11, %ymm1, %ymm1
vextracti128 $0x01, %ymm0, %xmm2
vextracti128 $0x01, %ymm1, %xmm4
vpblendw $0xe0, %xmm2, %xmm0, %xmm0
vpblendw $0xe0, %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, 240(%rdi)
vmovdqu %xmm1, 260(%rdi)
vmovss %xmm2, 256(%rdi)
vmovss %xmm4, 276(%rdi)
vmovdqu 448(%rsi), %ymm0
vmovdqu 480(%rsi), %ymm1
vpmullw %ymm7, %ymm0, %ymm2
vpmullw %ymm7, %ymm1, %ymm4
vpaddw %ymm12, %ymm0, %ymm3
vpaddw %ymm12, %ymm1, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm1, %ymm1
vpmulhuw %ymm6, %ymm0, %ymm0
vpmulhuw %ymm6, %ymm1, %ymm1
vpsubw %ymm3, %ymm2, %ymm3
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm3, %ymm2, %ymm2
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm2, %ymm2
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm2, %ymm0, %ymm0
vpsubw %ymm4, %ymm1, %ymm1
vpmulhrsw %ymm13, %ymm0, %ymm0
vpmulhrsw %ymm13, %ymm1, %ymm1
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpmaddwd %ymm8, %ymm0, %ymm0
vpmaddwd %ymm8, %ymm1, %ymm1
vpsllvd %ymm10, %ymm0, %ymm0
vpsllvd %ymm10, %ymm1, %ymm1
vpsrlq $12, %ymm0, %ymm0
vpsrlq $12, %ymm1, %ymm1
vpshufb %ymm11, %ymm0, %ymm0
vpshufb %ymm11, %ymm1, %ymm1
vextracti128 $0x01, %ymm0, %xmm2
vextracti128 $0x01, %ymm1, %xmm4
vpblendw $0xe0, %xmm2, %xmm0, %xmm0
vpblendw $0xe0, %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, 280(%rdi)
vmovdqu %xmm1, 300(%rdi)
vmovss %xmm2, 296(%rdi)
vmovss %xmm4, 316(%rdi)
addq $0x140, %rdi
addq $0x200, %rsi
subl $0x01, %edx
jg L_kyber_compress_10_avx2_start
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_compress_10_avx2,.-kyber_compress_10_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_kyber_decompress_10_avx2_mask:
.long 0x7fe01ff8,0x7fe01ff8,0x7fe01ff8,0x7fe01ff8
.long 0x7fe01ff8,0x7fe01ff8,0x7fe01ff8,0x7fe01ff8
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_decompress_10_avx2_sllv:
.quad 0x4, 0x4
.quad 0x4, 0x4
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_kyber_decompress_10_avx2_q:
.long 0xd013404,0xd013404,0xd013404,0xd013404
.long 0xd013404,0xd013404,0xd013404,0xd013404
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_decompress_10_avx2_shuf:
.value 0x100,0x201
.value 0x302,0x403
.value 0x605,0x706
.value 0x807,0x908
.value 0x302,0x403
.value 0x504,0x605
.value 0x807,0x908
.value 0xa09,0xb0a
#ifndef __APPLE__
.text
.globl kyber_decompress_10_avx2
.type kyber_decompress_10_avx2,@function
.align 16
kyber_decompress_10_avx2:
#else
.section __TEXT,__text
.globl _kyber_decompress_10_avx2
.p2align 4
_kyber_decompress_10_avx2:
#endif /* __APPLE__ */
vmovdqu L_kyber_decompress_10_avx2_mask(%rip), %ymm4
vmovdqu L_kyber_decompress_10_avx2_q(%rip), %ymm5
vmovdqu L_kyber_decompress_10_avx2_shuf(%rip), %ymm6
vmovdqu L_kyber_decompress_10_avx2_sllv(%rip), %ymm7
L_kyber_decompress_10_avx2_start:
vpermq $0x94, (%rsi), %ymm0
vpermq $0x94, 20(%rsi), %ymm1
vpermq $0x94, 40(%rsi), %ymm2
vpermq $0x94, 60(%rsi), %ymm3
vpshufb %ymm6, %ymm0, %ymm0
vpshufb %ymm6, %ymm1, %ymm1
vpshufb %ymm6, %ymm2, %ymm2
vpshufb %ymm6, %ymm3, %ymm3
vpsllvd %ymm7, %ymm0, %ymm0
vpsllvd %ymm7, %ymm1, %ymm1
vpsllvd %ymm7, %ymm2, %ymm2
vpsllvd %ymm7, %ymm3, %ymm3
vpsrlw $0x01, %ymm0, %ymm0
vpsrlw $0x01, %ymm1, %ymm1
vpsrlw $0x01, %ymm2, %ymm2
vpsrlw $0x01, %ymm3, %ymm3
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpand %ymm4, %ymm2, %ymm2
vpand %ymm4, %ymm3, %ymm3
vpmulhrsw %ymm5, %ymm0, %ymm0
vpmulhrsw %ymm5, %ymm1, %ymm1
vpmulhrsw %ymm5, %ymm2, %ymm2
vpmulhrsw %ymm5, %ymm3, %ymm3
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, 96(%rdi)
vpermq $0x94, 80(%rsi), %ymm0
vpermq $0x94, 100(%rsi), %ymm1
vpermq $0x94, 120(%rsi), %ymm2
vpermq $0x94, 140(%rsi), %ymm3
vpshufb %ymm6, %ymm0, %ymm0
vpshufb %ymm6, %ymm1, %ymm1
vpshufb %ymm6, %ymm2, %ymm2
vpshufb %ymm6, %ymm3, %ymm3
vpsllvd %ymm7, %ymm0, %ymm0
vpsllvd %ymm7, %ymm1, %ymm1
vpsllvd %ymm7, %ymm2, %ymm2
vpsllvd %ymm7, %ymm3, %ymm3
vpsrlw $0x01, %ymm0, %ymm0
vpsrlw $0x01, %ymm1, %ymm1
vpsrlw $0x01, %ymm2, %ymm2
vpsrlw $0x01, %ymm3, %ymm3
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpand %ymm4, %ymm2, %ymm2
vpand %ymm4, %ymm3, %ymm3
vpmulhrsw %ymm5, %ymm0, %ymm0
vpmulhrsw %ymm5, %ymm1, %ymm1
vpmulhrsw %ymm5, %ymm2, %ymm2
vpmulhrsw %ymm5, %ymm3, %ymm3
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, 160(%rdi)
vmovdqu %ymm2, 192(%rdi)
vmovdqu %ymm3, 224(%rdi)
vpermq $0x94, 160(%rsi), %ymm0
vpermq $0x94, 180(%rsi), %ymm1
vpermq $0x94, 200(%rsi), %ymm2
vpermq $0x94, 220(%rsi), %ymm3
vpshufb %ymm6, %ymm0, %ymm0
vpshufb %ymm6, %ymm1, %ymm1
vpshufb %ymm6, %ymm2, %ymm2
vpshufb %ymm6, %ymm3, %ymm3
vpsllvd %ymm7, %ymm0, %ymm0
vpsllvd %ymm7, %ymm1, %ymm1
vpsllvd %ymm7, %ymm2, %ymm2
vpsllvd %ymm7, %ymm3, %ymm3
vpsrlw $0x01, %ymm0, %ymm0
vpsrlw $0x01, %ymm1, %ymm1
vpsrlw $0x01, %ymm2, %ymm2
vpsrlw $0x01, %ymm3, %ymm3
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpand %ymm4, %ymm2, %ymm2
vpand %ymm4, %ymm3, %ymm3
vpmulhrsw %ymm5, %ymm0, %ymm0
vpmulhrsw %ymm5, %ymm1, %ymm1
vpmulhrsw %ymm5, %ymm2, %ymm2
vpmulhrsw %ymm5, %ymm3, %ymm3
vmovdqu %ymm0, 256(%rdi)
vmovdqu %ymm1, 288(%rdi)
vmovdqu %ymm2, 320(%rdi)
vmovdqu %ymm3, 352(%rdi)
vpermq $0x94, 240(%rsi), %ymm0
vpermq $0x94, 260(%rsi), %ymm1
vpermq $0x94, 280(%rsi), %ymm2
vpermq $0x94, 300(%rsi), %ymm3
vpshufb %ymm6, %ymm0, %ymm0
vpshufb %ymm6, %ymm1, %ymm1
vpshufb %ymm6, %ymm2, %ymm2
vpshufb %ymm6, %ymm3, %ymm3
vpsllvd %ymm7, %ymm0, %ymm0
vpsllvd %ymm7, %ymm1, %ymm1
vpsllvd %ymm7, %ymm2, %ymm2
vpsllvd %ymm7, %ymm3, %ymm3
vpsrlw $0x01, %ymm0, %ymm0
vpsrlw $0x01, %ymm1, %ymm1
vpsrlw $0x01, %ymm2, %ymm2
vpsrlw $0x01, %ymm3, %ymm3
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpand %ymm4, %ymm2, %ymm2
vpand %ymm4, %ymm3, %ymm3
vpmulhrsw %ymm5, %ymm0, %ymm0
vpmulhrsw %ymm5, %ymm1, %ymm1
vpmulhrsw %ymm5, %ymm2, %ymm2
vpmulhrsw %ymm5, %ymm3, %ymm3
vmovdqu %ymm0, 384(%rdi)
vmovdqu %ymm1, 416(%rdi)
vmovdqu %ymm2, 448(%rdi)
vmovdqu %ymm3, 480(%rdi)
addq $0x140, %rsi
addq $0x200, %rdi
subl $0x01, %edx
jg L_kyber_decompress_10_avx2_start
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_decompress_10_avx2,.-kyber_decompress_10_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_11_avx2_v:
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_11_avx2_off:
.value 0x24,0x24
.value 0x24,0x24
.value 0x24,0x24
.value 0x24,0x24
.value 0x24,0x24
.value 0x24,0x24
.value 0x24,0x24
.value 0x24,0x24
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_11_avx2_shift13:
.value 0x2000,0x2000
.value 0x2000,0x2000
.value 0x2000,0x2000
.value 0x2000,0x2000
.value 0x2000,0x2000
.value 0x2000,0x2000
.value 0x2000,0x2000
.value 0x2000,0x2000
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_11_avx2_mask:
.value 0x7ff,0x7ff
.value 0x7ff,0x7ff
.value 0x7ff,0x7ff
.value 0x7ff,0x7ff
.value 0x7ff,0x7ff
.value 0x7ff,0x7ff
.value 0x7ff,0x7ff
.value 0x7ff,0x7ff
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_compress_11_avx2_shift:
.quad 0x800000108000001, 0x800000108000001
.quad 0x800000108000001, 0x800000108000001
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_kyber_compress_11_avx2_sllvd:
.long 0xa,0x0,0xa,0x0
.long 0xa,0x0,0xa,0x0
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_compress_11_avx2_srlvq:
.quad 0xa, 0x1e
.quad 0xa, 0x1e
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_11_avx2_shuf:
.value 0x100,0x302
.value 0x504,0x706
.value 0x908,0xff0a
.value 0xffff,0xffff
.value 0x605,0x807
.value 0xa09,0xffff
.value 0xffff,0x0
.value 0x201,0x403
#ifndef __APPLE__
.text
.globl kyber_compress_11_avx2
.type kyber_compress_11_avx2,@function
.align 16
kyber_compress_11_avx2:
#else
.section __TEXT,__text
.globl _kyber_compress_11_avx2
.p2align 4
_kyber_compress_11_avx2:
#endif /* __APPLE__ */
vmovdqu (%rsi), %ymm0
vmovdqu L_kyber_compress_11_avx2_v(%rip), %ymm7
vmovdqu L_kyber_compress_11_avx2_off(%rip), %ymm8
vmovdqu L_kyber_compress_11_avx2_shift13(%rip), %ymm9
vmovdqu L_kyber_compress_11_avx2_mask(%rip), %ymm10
vmovdqu L_kyber_compress_11_avx2_shift(%rip), %ymm11
vmovdqu L_kyber_compress_11_avx2_sllvd(%rip), %ymm12
vmovdqu L_kyber_compress_11_avx2_srlvq(%rip), %ymm13
vmovdqu L_kyber_compress_11_avx2_shuf(%rip), %ymm14
vpsllw $3, %ymm7, %ymm6
L_kyber_compress_11_avx2_start:
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm3
vpmullw %ymm6, %ymm0, %ymm1
vpmullw %ymm6, %ymm3, %ymm4
vpaddw %ymm8, %ymm0, %ymm2
vpaddw %ymm8, %ymm3, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm3, %ymm3
vpmulhw %ymm7, %ymm0, %ymm0
vpmulhw %ymm7, %ymm3, %ymm3
vpsubw %ymm2, %ymm1, %ymm2
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm2, %ymm1, %ymm1
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm1, %ymm1
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm1, %ymm0, %ymm0
vpsubw %ymm4, %ymm3, %ymm3
vpmulhrsw %ymm9, %ymm0, %ymm0
vpmulhrsw %ymm9, %ymm3, %ymm3
vpand %ymm10, %ymm0, %ymm0
vpand %ymm10, %ymm3, %ymm3
vpmaddwd %ymm11, %ymm0, %ymm0
vpmaddwd %ymm11, %ymm3, %ymm3
vpsllvd %ymm12, %ymm0, %ymm0
vpsllvd %ymm12, %ymm3, %ymm3
vpsrldq $8, %ymm0, %ymm1
vpsrldq $8, %ymm3, %ymm4
vpsrlvq %ymm13, %ymm0, %ymm0
vpsrlvq %ymm13, %ymm3, %ymm3
vpsllq $34, %ymm1, %ymm1
vpsllq $34, %ymm4, %ymm4
vpaddq %ymm1, %ymm0, %ymm0
vpaddq %ymm4, %ymm3, %ymm3
vpshufb %ymm14, %ymm0, %ymm0
vpshufb %ymm14, %ymm3, %ymm3
vextracti128 $0x01, %ymm0, %xmm1
vextracti128 $0x01, %ymm3, %xmm4
vpblendvb %xmm14, %xmm1, %xmm0, %xmm0
vpblendvb %xmm14, %xmm4, %xmm3, %xmm3
vmovdqu %xmm0, (%rdi)
vmovq %xmm1, 16(%rdi)
vmovdqu %xmm3, 22(%rdi)
vmovq %xmm4, 38(%rdi)
vmovdqu 64(%rsi), %ymm0
vmovdqu 96(%rsi), %ymm3
vpmullw %ymm6, %ymm0, %ymm1
vpmullw %ymm6, %ymm3, %ymm4
vpaddw %ymm8, %ymm0, %ymm2
vpaddw %ymm8, %ymm3, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm3, %ymm3
vpmulhw %ymm7, %ymm0, %ymm0
vpmulhw %ymm7, %ymm3, %ymm3
vpsubw %ymm2, %ymm1, %ymm2
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm2, %ymm1, %ymm1
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm1, %ymm1
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm1, %ymm0, %ymm0
vpsubw %ymm4, %ymm3, %ymm3
vpmulhrsw %ymm9, %ymm0, %ymm0
vpmulhrsw %ymm9, %ymm3, %ymm3
vpand %ymm10, %ymm0, %ymm0
vpand %ymm10, %ymm3, %ymm3
vpmaddwd %ymm11, %ymm0, %ymm0
vpmaddwd %ymm11, %ymm3, %ymm3
vpsllvd %ymm12, %ymm0, %ymm0
vpsllvd %ymm12, %ymm3, %ymm3
vpsrldq $8, %ymm0, %ymm1
vpsrldq $8, %ymm3, %ymm4
vpsrlvq %ymm13, %ymm0, %ymm0
vpsrlvq %ymm13, %ymm3, %ymm3
vpsllq $34, %ymm1, %ymm1
vpsllq $34, %ymm4, %ymm4
vpaddq %ymm1, %ymm0, %ymm0
vpaddq %ymm4, %ymm3, %ymm3
vpshufb %ymm14, %ymm0, %ymm0
vpshufb %ymm14, %ymm3, %ymm3
vextracti128 $0x01, %ymm0, %xmm1
vextracti128 $0x01, %ymm3, %xmm4
vpblendvb %xmm14, %xmm1, %xmm0, %xmm0
vpblendvb %xmm14, %xmm4, %xmm3, %xmm3
vmovdqu %xmm0, 44(%rdi)
vmovq %xmm1, 60(%rdi)
vmovdqu %xmm3, 66(%rdi)
vmovq %xmm4, 82(%rdi)
vmovdqu 128(%rsi), %ymm0
vmovdqu 160(%rsi), %ymm3
vpmullw %ymm6, %ymm0, %ymm1
vpmullw %ymm6, %ymm3, %ymm4
vpaddw %ymm8, %ymm0, %ymm2
vpaddw %ymm8, %ymm3, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm3, %ymm3
vpmulhw %ymm7, %ymm0, %ymm0
vpmulhw %ymm7, %ymm3, %ymm3
vpsubw %ymm2, %ymm1, %ymm2
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm2, %ymm1, %ymm1
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm1, %ymm1
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm1, %ymm0, %ymm0
vpsubw %ymm4, %ymm3, %ymm3
vpmulhrsw %ymm9, %ymm0, %ymm0
vpmulhrsw %ymm9, %ymm3, %ymm3
vpand %ymm10, %ymm0, %ymm0
vpand %ymm10, %ymm3, %ymm3
vpmaddwd %ymm11, %ymm0, %ymm0
vpmaddwd %ymm11, %ymm3, %ymm3
vpsllvd %ymm12, %ymm0, %ymm0
vpsllvd %ymm12, %ymm3, %ymm3
vpsrldq $8, %ymm0, %ymm1
vpsrldq $8, %ymm3, %ymm4
vpsrlvq %ymm13, %ymm0, %ymm0
vpsrlvq %ymm13, %ymm3, %ymm3
vpsllq $34, %ymm1, %ymm1
vpsllq $34, %ymm4, %ymm4
vpaddq %ymm1, %ymm0, %ymm0
vpaddq %ymm4, %ymm3, %ymm3
vpshufb %ymm14, %ymm0, %ymm0
vpshufb %ymm14, %ymm3, %ymm3
vextracti128 $0x01, %ymm0, %xmm1
vextracti128 $0x01, %ymm3, %xmm4
vpblendvb %xmm14, %xmm1, %xmm0, %xmm0
vpblendvb %xmm14, %xmm4, %xmm3, %xmm3
vmovdqu %xmm0, 88(%rdi)
vmovq %xmm1, 104(%rdi)
vmovdqu %xmm3, 110(%rdi)
vmovq %xmm4, 126(%rdi)
vmovdqu 192(%rsi), %ymm0
vmovdqu 224(%rsi), %ymm3
vpmullw %ymm6, %ymm0, %ymm1
vpmullw %ymm6, %ymm3, %ymm4
vpaddw %ymm8, %ymm0, %ymm2
vpaddw %ymm8, %ymm3, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm3, %ymm3
vpmulhw %ymm7, %ymm0, %ymm0
vpmulhw %ymm7, %ymm3, %ymm3
vpsubw %ymm2, %ymm1, %ymm2
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm2, %ymm1, %ymm1
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm1, %ymm1
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm1, %ymm0, %ymm0
vpsubw %ymm4, %ymm3, %ymm3
vpmulhrsw %ymm9, %ymm0, %ymm0
vpmulhrsw %ymm9, %ymm3, %ymm3
vpand %ymm10, %ymm0, %ymm0
vpand %ymm10, %ymm3, %ymm3
vpmaddwd %ymm11, %ymm0, %ymm0
vpmaddwd %ymm11, %ymm3, %ymm3
vpsllvd %ymm12, %ymm0, %ymm0
vpsllvd %ymm12, %ymm3, %ymm3
vpsrldq $8, %ymm0, %ymm1
vpsrldq $8, %ymm3, %ymm4
vpsrlvq %ymm13, %ymm0, %ymm0
vpsrlvq %ymm13, %ymm3, %ymm3
vpsllq $34, %ymm1, %ymm1
vpsllq $34, %ymm4, %ymm4
vpaddq %ymm1, %ymm0, %ymm0
vpaddq %ymm4, %ymm3, %ymm3
vpshufb %ymm14, %ymm0, %ymm0
vpshufb %ymm14, %ymm3, %ymm3
vextracti128 $0x01, %ymm0, %xmm1
vextracti128 $0x01, %ymm3, %xmm4
vpblendvb %xmm14, %xmm1, %xmm0, %xmm0
vpblendvb %xmm14, %xmm4, %xmm3, %xmm3
vmovdqu %xmm0, 132(%rdi)
vmovq %xmm1, 148(%rdi)
vmovdqu %xmm3, 154(%rdi)
vmovq %xmm4, 170(%rdi)
vmovdqu 256(%rsi), %ymm0
vmovdqu 288(%rsi), %ymm3
vpmullw %ymm6, %ymm0, %ymm1
vpmullw %ymm6, %ymm3, %ymm4
vpaddw %ymm8, %ymm0, %ymm2
vpaddw %ymm8, %ymm3, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm3, %ymm3
vpmulhw %ymm7, %ymm0, %ymm0
vpmulhw %ymm7, %ymm3, %ymm3
vpsubw %ymm2, %ymm1, %ymm2
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm2, %ymm1, %ymm1
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm1, %ymm1
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm1, %ymm0, %ymm0
vpsubw %ymm4, %ymm3, %ymm3
vpmulhrsw %ymm9, %ymm0, %ymm0
vpmulhrsw %ymm9, %ymm3, %ymm3
vpand %ymm10, %ymm0, %ymm0
vpand %ymm10, %ymm3, %ymm3
vpmaddwd %ymm11, %ymm0, %ymm0
vpmaddwd %ymm11, %ymm3, %ymm3
vpsllvd %ymm12, %ymm0, %ymm0
vpsllvd %ymm12, %ymm3, %ymm3
vpsrldq $8, %ymm0, %ymm1
vpsrldq $8, %ymm3, %ymm4
vpsrlvq %ymm13, %ymm0, %ymm0
vpsrlvq %ymm13, %ymm3, %ymm3
vpsllq $34, %ymm1, %ymm1
vpsllq $34, %ymm4, %ymm4
vpaddq %ymm1, %ymm0, %ymm0
vpaddq %ymm4, %ymm3, %ymm3
vpshufb %ymm14, %ymm0, %ymm0
vpshufb %ymm14, %ymm3, %ymm3
vextracti128 $0x01, %ymm0, %xmm1
vextracti128 $0x01, %ymm3, %xmm4
vpblendvb %xmm14, %xmm1, %xmm0, %xmm0
vpblendvb %xmm14, %xmm4, %xmm3, %xmm3
vmovdqu %xmm0, 176(%rdi)
vmovq %xmm1, 192(%rdi)
vmovdqu %xmm3, 198(%rdi)
vmovq %xmm4, 214(%rdi)
vmovdqu 320(%rsi), %ymm0
vmovdqu 352(%rsi), %ymm3
vpmullw %ymm6, %ymm0, %ymm1
vpmullw %ymm6, %ymm3, %ymm4
vpaddw %ymm8, %ymm0, %ymm2
vpaddw %ymm8, %ymm3, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm3, %ymm3
vpmulhw %ymm7, %ymm0, %ymm0
vpmulhw %ymm7, %ymm3, %ymm3
vpsubw %ymm2, %ymm1, %ymm2
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm2, %ymm1, %ymm1
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm1, %ymm1
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm1, %ymm0, %ymm0
vpsubw %ymm4, %ymm3, %ymm3
vpmulhrsw %ymm9, %ymm0, %ymm0
vpmulhrsw %ymm9, %ymm3, %ymm3
vpand %ymm10, %ymm0, %ymm0
vpand %ymm10, %ymm3, %ymm3
vpmaddwd %ymm11, %ymm0, %ymm0
vpmaddwd %ymm11, %ymm3, %ymm3
vpsllvd %ymm12, %ymm0, %ymm0
vpsllvd %ymm12, %ymm3, %ymm3
vpsrldq $8, %ymm0, %ymm1
vpsrldq $8, %ymm3, %ymm4
vpsrlvq %ymm13, %ymm0, %ymm0
vpsrlvq %ymm13, %ymm3, %ymm3
vpsllq $34, %ymm1, %ymm1
vpsllq $34, %ymm4, %ymm4
vpaddq %ymm1, %ymm0, %ymm0
vpaddq %ymm4, %ymm3, %ymm3
vpshufb %ymm14, %ymm0, %ymm0
vpshufb %ymm14, %ymm3, %ymm3
vextracti128 $0x01, %ymm0, %xmm1
vextracti128 $0x01, %ymm3, %xmm4
vpblendvb %xmm14, %xmm1, %xmm0, %xmm0
vpblendvb %xmm14, %xmm4, %xmm3, %xmm3
vmovdqu %xmm0, 220(%rdi)
vmovq %xmm1, 236(%rdi)
vmovdqu %xmm3, 242(%rdi)
vmovq %xmm4, 258(%rdi)
vmovdqu 384(%rsi), %ymm0
vmovdqu 416(%rsi), %ymm3
vpmullw %ymm6, %ymm0, %ymm1
vpmullw %ymm6, %ymm3, %ymm4
vpaddw %ymm8, %ymm0, %ymm2
vpaddw %ymm8, %ymm3, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm3, %ymm3
vpmulhw %ymm7, %ymm0, %ymm0
vpmulhw %ymm7, %ymm3, %ymm3
vpsubw %ymm2, %ymm1, %ymm2
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm2, %ymm1, %ymm1
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm1, %ymm1
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm1, %ymm0, %ymm0
vpsubw %ymm4, %ymm3, %ymm3
vpmulhrsw %ymm9, %ymm0, %ymm0
vpmulhrsw %ymm9, %ymm3, %ymm3
vpand %ymm10, %ymm0, %ymm0
vpand %ymm10, %ymm3, %ymm3
vpmaddwd %ymm11, %ymm0, %ymm0
vpmaddwd %ymm11, %ymm3, %ymm3
vpsllvd %ymm12, %ymm0, %ymm0
vpsllvd %ymm12, %ymm3, %ymm3
vpsrldq $8, %ymm0, %ymm1
vpsrldq $8, %ymm3, %ymm4
vpsrlvq %ymm13, %ymm0, %ymm0
vpsrlvq %ymm13, %ymm3, %ymm3
vpsllq $34, %ymm1, %ymm1
vpsllq $34, %ymm4, %ymm4
vpaddq %ymm1, %ymm0, %ymm0
vpaddq %ymm4, %ymm3, %ymm3
vpshufb %ymm14, %ymm0, %ymm0
vpshufb %ymm14, %ymm3, %ymm3
vextracti128 $0x01, %ymm0, %xmm1
vextracti128 $0x01, %ymm3, %xmm4
vpblendvb %xmm14, %xmm1, %xmm0, %xmm0
vpblendvb %xmm14, %xmm4, %xmm3, %xmm3
vmovdqu %xmm0, 264(%rdi)
vmovq %xmm1, 280(%rdi)
vmovdqu %xmm3, 286(%rdi)
vmovq %xmm4, 302(%rdi)
vmovdqu 448(%rsi), %ymm0
vmovdqu 480(%rsi), %ymm3
vpmullw %ymm6, %ymm0, %ymm1
vpmullw %ymm6, %ymm3, %ymm4
vpaddw %ymm8, %ymm0, %ymm2
vpaddw %ymm8, %ymm3, %ymm5
vpsllw $3, %ymm0, %ymm0
vpsllw $3, %ymm3, %ymm3
vpmulhw %ymm7, %ymm0, %ymm0
vpmulhw %ymm7, %ymm3, %ymm3
vpsubw %ymm2, %ymm1, %ymm2
vpsubw %ymm5, %ymm4, %ymm5
vpandn %ymm2, %ymm1, %ymm1
vpandn %ymm5, %ymm4, %ymm4
vpsrlw $15, %ymm1, %ymm1
vpsrlw $15, %ymm4, %ymm4
vpsubw %ymm1, %ymm0, %ymm0
vpsubw %ymm4, %ymm3, %ymm3
vpmulhrsw %ymm9, %ymm0, %ymm0
vpmulhrsw %ymm9, %ymm3, %ymm3
vpand %ymm10, %ymm0, %ymm0
vpand %ymm10, %ymm3, %ymm3
vpmaddwd %ymm11, %ymm0, %ymm0
vpmaddwd %ymm11, %ymm3, %ymm3
vpsllvd %ymm12, %ymm0, %ymm0
vpsllvd %ymm12, %ymm3, %ymm3
vpsrldq $8, %ymm0, %ymm1
vpsrldq $8, %ymm3, %ymm4
vpsrlvq %ymm13, %ymm0, %ymm0
vpsrlvq %ymm13, %ymm3, %ymm3
vpsllq $34, %ymm1, %ymm1
vpsllq $34, %ymm4, %ymm4
vpaddq %ymm1, %ymm0, %ymm0
vpaddq %ymm4, %ymm3, %ymm3
vpshufb %ymm14, %ymm0, %ymm0
vpshufb %ymm14, %ymm3, %ymm3
vextracti128 $0x01, %ymm0, %xmm1
vextracti128 $0x01, %ymm3, %xmm4
vpblendvb %xmm14, %xmm1, %xmm0, %xmm0
vpblendvb %xmm14, %xmm4, %xmm3, %xmm3
vmovdqu %xmm0, 308(%rdi)
vmovq %xmm1, 324(%rdi)
vmovdqu %xmm3, 330(%rdi)
vmovq %xmm4, 346(%rdi)
addq $0x160, %rdi
addq $0x200, %rsi
subl $0x01, %edx
jg L_kyber_compress_11_avx2_start
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_compress_11_avx2,.-kyber_compress_11_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_decompress_11_avx2_q:
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_decompress_11_avx2_shuf:
.value 0x100,0x201
.value 0x302,0x504
.value 0x605,0x706
.value 0x908,0xa09
.value 0x403,0x504
.value 0x605,0x807
.value 0x908,0xa09
.value 0xc0b,0xd0c
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_kyber_decompress_11_avx2_sllv:
.long 0x0,0x1,0x0,0x0
.long 0x0,0x1,0x0,0x0
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_decompress_11_avx2_srlv:
.quad 0x0, 0x2
.quad 0x0, 0x2
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_decompress_11_avx2_shift:
.value 0x20,0x4
.value 0x1,0x20
.value 0x8,0x1
.value 0x20,0x4
.value 0x20,0x4
.value 0x1,0x20
.value 0x8,0x1
.value 0x20,0x4
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_decompress_11_avx2_mask:
.value 0x7ff0,0x7ff0
.value 0x7ff0,0x7ff0
.value 0x7ff0,0x7ff0
.value 0x7ff0,0x7ff0
.value 0x7ff0,0x7ff0
.value 0x7ff0,0x7ff0
.value 0x7ff0,0x7ff0
.value 0x7ff0,0x7ff0
#ifndef __APPLE__
.text
.globl kyber_decompress_11_avx2
.type kyber_decompress_11_avx2,@function
.align 16
kyber_decompress_11_avx2:
#else
.section __TEXT,__text
.globl _kyber_decompress_11_avx2
.p2align 4
_kyber_decompress_11_avx2:
#endif /* __APPLE__ */
vmovdqu L_kyber_decompress_11_avx2_q(%rip), %ymm4
vmovdqu L_kyber_decompress_11_avx2_shuf(%rip), %ymm5
vmovdqu L_kyber_decompress_11_avx2_sllv(%rip), %ymm6
vmovdqu L_kyber_decompress_11_avx2_srlv(%rip), %ymm7
vmovdqu L_kyber_decompress_11_avx2_shift(%rip), %ymm8
vmovdqu L_kyber_decompress_11_avx2_mask(%rip), %ymm9
L_kyber_decompress_11_avx2_start:
vpermq $0x94, (%rsi), %ymm0
vpermq $0x94, 22(%rsi), %ymm1
vpermq $0x94, 44(%rsi), %ymm2
vpermq $0x94, 66(%rsi), %ymm3
vpshufb %ymm5, %ymm0, %ymm0
vpshufb %ymm5, %ymm1, %ymm1
vpshufb %ymm5, %ymm2, %ymm2
vpshufb %ymm5, %ymm3, %ymm3
vpsrlvd %ymm6, %ymm0, %ymm0
vpsrlvd %ymm6, %ymm1, %ymm1
vpsrlvd %ymm6, %ymm2, %ymm2
vpsrlvd %ymm6, %ymm3, %ymm3
vpsrlvq %ymm7, %ymm0, %ymm0
vpsrlvq %ymm7, %ymm1, %ymm1
vpsrlvq %ymm7, %ymm2, %ymm2
vpsrlvq %ymm7, %ymm3, %ymm3
vpmullw %ymm8, %ymm0, %ymm0
vpmullw %ymm8, %ymm1, %ymm1
vpmullw %ymm8, %ymm2, %ymm2
vpmullw %ymm8, %ymm3, %ymm3
vpsrlw $0x01, %ymm0, %ymm0
vpsrlw $0x01, %ymm1, %ymm1
vpsrlw $0x01, %ymm2, %ymm2
vpsrlw $0x01, %ymm3, %ymm3
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpand %ymm9, %ymm2, %ymm2
vpand %ymm9, %ymm3, %ymm3
vpmulhrsw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm4, %ymm1, %ymm1
vpmulhrsw %ymm4, %ymm2, %ymm2
vpmulhrsw %ymm4, %ymm3, %ymm3
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, 96(%rdi)
vpermq $0x94, 88(%rsi), %ymm0
vpermq $0x94, 110(%rsi), %ymm1
vpermq $0x94, 132(%rsi), %ymm2
vpermq $0x94, 154(%rsi), %ymm3
vpshufb %ymm5, %ymm0, %ymm0
vpshufb %ymm5, %ymm1, %ymm1
vpshufb %ymm5, %ymm2, %ymm2
vpshufb %ymm5, %ymm3, %ymm3
vpsrlvd %ymm6, %ymm0, %ymm0
vpsrlvd %ymm6, %ymm1, %ymm1
vpsrlvd %ymm6, %ymm2, %ymm2
vpsrlvd %ymm6, %ymm3, %ymm3
vpsrlvq %ymm7, %ymm0, %ymm0
vpsrlvq %ymm7, %ymm1, %ymm1
vpsrlvq %ymm7, %ymm2, %ymm2
vpsrlvq %ymm7, %ymm3, %ymm3
vpmullw %ymm8, %ymm0, %ymm0
vpmullw %ymm8, %ymm1, %ymm1
vpmullw %ymm8, %ymm2, %ymm2
vpmullw %ymm8, %ymm3, %ymm3
vpsrlw $0x01, %ymm0, %ymm0
vpsrlw $0x01, %ymm1, %ymm1
vpsrlw $0x01, %ymm2, %ymm2
vpsrlw $0x01, %ymm3, %ymm3
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpand %ymm9, %ymm2, %ymm2
vpand %ymm9, %ymm3, %ymm3
vpmulhrsw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm4, %ymm1, %ymm1
vpmulhrsw %ymm4, %ymm2, %ymm2
vpmulhrsw %ymm4, %ymm3, %ymm3
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, 160(%rdi)
vmovdqu %ymm2, 192(%rdi)
vmovdqu %ymm3, 224(%rdi)
vpermq $0x94, 176(%rsi), %ymm0
vpermq $0x94, 198(%rsi), %ymm1
vpermq $0x94, 220(%rsi), %ymm2
vpermq $0x94, 242(%rsi), %ymm3
vpshufb %ymm5, %ymm0, %ymm0
vpshufb %ymm5, %ymm1, %ymm1
vpshufb %ymm5, %ymm2, %ymm2
vpshufb %ymm5, %ymm3, %ymm3
vpsrlvd %ymm6, %ymm0, %ymm0
vpsrlvd %ymm6, %ymm1, %ymm1
vpsrlvd %ymm6, %ymm2, %ymm2
vpsrlvd %ymm6, %ymm3, %ymm3
vpsrlvq %ymm7, %ymm0, %ymm0
vpsrlvq %ymm7, %ymm1, %ymm1
vpsrlvq %ymm7, %ymm2, %ymm2
vpsrlvq %ymm7, %ymm3, %ymm3
vpmullw %ymm8, %ymm0, %ymm0
vpmullw %ymm8, %ymm1, %ymm1
vpmullw %ymm8, %ymm2, %ymm2
vpmullw %ymm8, %ymm3, %ymm3
vpsrlw $0x01, %ymm0, %ymm0
vpsrlw $0x01, %ymm1, %ymm1
vpsrlw $0x01, %ymm2, %ymm2
vpsrlw $0x01, %ymm3, %ymm3
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpand %ymm9, %ymm2, %ymm2
vpand %ymm9, %ymm3, %ymm3
vpmulhrsw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm4, %ymm1, %ymm1
vpmulhrsw %ymm4, %ymm2, %ymm2
vpmulhrsw %ymm4, %ymm3, %ymm3
vmovdqu %ymm0, 256(%rdi)
vmovdqu %ymm1, 288(%rdi)
vmovdqu %ymm2, 320(%rdi)
vmovdqu %ymm3, 352(%rdi)
vpermq $0x94, 264(%rsi), %ymm0
vpermq $0x94, 286(%rsi), %ymm1
vpermq $0x94, 308(%rsi), %ymm2
vpermq $0x94, 330(%rsi), %ymm3
vpshufb %ymm5, %ymm0, %ymm0
vpshufb %ymm5, %ymm1, %ymm1
vpshufb %ymm5, %ymm2, %ymm2
vpshufb %ymm5, %ymm3, %ymm3
vpsrlvd %ymm6, %ymm0, %ymm0
vpsrlvd %ymm6, %ymm1, %ymm1
vpsrlvd %ymm6, %ymm2, %ymm2
vpsrlvd %ymm6, %ymm3, %ymm3
vpsrlvq %ymm7, %ymm0, %ymm0
vpsrlvq %ymm7, %ymm1, %ymm1
vpsrlvq %ymm7, %ymm2, %ymm2
vpsrlvq %ymm7, %ymm3, %ymm3
vpmullw %ymm8, %ymm0, %ymm0
vpmullw %ymm8, %ymm1, %ymm1
vpmullw %ymm8, %ymm2, %ymm2
vpmullw %ymm8, %ymm3, %ymm3
vpsrlw $0x01, %ymm0, %ymm0
vpsrlw $0x01, %ymm1, %ymm1
vpsrlw $0x01, %ymm2, %ymm2
vpsrlw $0x01, %ymm3, %ymm3
vpand %ymm9, %ymm0, %ymm0
vpand %ymm9, %ymm1, %ymm1
vpand %ymm9, %ymm2, %ymm2
vpand %ymm9, %ymm3, %ymm3
vpmulhrsw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm4, %ymm1, %ymm1
vpmulhrsw %ymm4, %ymm2, %ymm2
vpmulhrsw %ymm4, %ymm3, %ymm3
vmovdqu %ymm0, 384(%rdi)
vmovdqu %ymm1, 416(%rdi)
vmovdqu %ymm2, 448(%rdi)
vmovdqu %ymm3, 480(%rdi)
addq $0x160, %rsi
addq $0x200, %rdi
subl $0x01, %edx
jg L_kyber_decompress_11_avx2_start
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_decompress_11_avx2,.-kyber_decompress_11_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_4_avx2_mask:
.value 0xf,0xf
.value 0xf,0xf
.value 0xf,0xf
.value 0xf,0xf
.value 0xf,0xf
.value 0xf,0xf
.value 0xf,0xf
.value 0xf,0xf
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_4_avx2_shift:
.value 0x200,0x200
.value 0x200,0x200
.value 0x200,0x200
.value 0x200,0x200
.value 0x200,0x200
.value 0x200,0x200
.value 0x200,0x200
.value 0x200,0x200
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_kyber_compress_4_avx2_perm:
.long 0x0,0x4,0x1,0x5
.long 0x2,0x6,0x3,0x7
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_4_avx2_v:
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_4_avx2_shift12:
.value 0x1001,0x1001
.value 0x1001,0x1001
.value 0x1001,0x1001
.value 0x1001,0x1001
.value 0x1001,0x1001
.value 0x1001,0x1001
.value 0x1001,0x1001
.value 0x1001,0x1001
#ifndef __APPLE__
.text
.globl kyber_compress_4_avx2
.type kyber_compress_4_avx2,@function
.align 16
kyber_compress_4_avx2:
#else
.section __TEXT,__text
.globl _kyber_compress_4_avx2
.p2align 4
_kyber_compress_4_avx2:
#endif /* __APPLE__ */
vmovdqu L_kyber_compress_4_avx2_mask(%rip), %ymm8
vmovdqu L_kyber_compress_4_avx2_shift(%rip), %ymm9
vmovdqu L_kyber_compress_4_avx2_perm(%rip), %ymm10
vmovdqu L_kyber_compress_4_avx2_v(%rip), %ymm11
vmovdqu L_kyber_compress_4_avx2_shift12(%rip), %ymm12
vpmulhw (%rsi), %ymm11, %ymm0
vpmulhw 32(%rsi), %ymm11, %ymm1
vpmulhw 64(%rsi), %ymm11, %ymm2
vpmulhw 96(%rsi), %ymm11, %ymm3
vpmulhrsw %ymm9, %ymm0, %ymm0
vpmulhrsw %ymm9, %ymm1, %ymm1
vpmulhrsw %ymm9, %ymm2, %ymm2
vpmulhrsw %ymm9, %ymm3, %ymm3
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpand %ymm8, %ymm2, %ymm2
vpand %ymm8, %ymm3, %ymm3
vpackuswb %ymm1, %ymm0, %ymm0
vpackuswb %ymm3, %ymm2, %ymm2
vpmaddubsw %ymm12, %ymm0, %ymm0
vpmaddubsw %ymm12, %ymm2, %ymm2
vpackuswb %ymm2, %ymm0, %ymm0
vpmulhw 128(%rsi), %ymm11, %ymm4
vpmulhw 160(%rsi), %ymm11, %ymm5
vpmulhw 192(%rsi), %ymm11, %ymm6
vpmulhw 224(%rsi), %ymm11, %ymm7
vpmulhrsw %ymm9, %ymm4, %ymm4
vpmulhrsw %ymm9, %ymm5, %ymm5
vpmulhrsw %ymm9, %ymm6, %ymm6
vpmulhrsw %ymm9, %ymm7, %ymm7
vpand %ymm8, %ymm4, %ymm4
vpand %ymm8, %ymm5, %ymm5
vpand %ymm8, %ymm6, %ymm6
vpand %ymm8, %ymm7, %ymm7
vpackuswb %ymm5, %ymm4, %ymm4
vpackuswb %ymm7, %ymm6, %ymm6
vpmaddubsw %ymm12, %ymm4, %ymm4
vpmaddubsw %ymm12, %ymm6, %ymm6
vpackuswb %ymm6, %ymm4, %ymm4
vpermd %ymm0, %ymm10, %ymm0
vpermd %ymm4, %ymm10, %ymm4
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm4, 32(%rdi)
vpmulhw 256(%rsi), %ymm11, %ymm0
vpmulhw 288(%rsi), %ymm11, %ymm1
vpmulhw 320(%rsi), %ymm11, %ymm2
vpmulhw 352(%rsi), %ymm11, %ymm3
vpmulhrsw %ymm9, %ymm0, %ymm0
vpmulhrsw %ymm9, %ymm1, %ymm1
vpmulhrsw %ymm9, %ymm2, %ymm2
vpmulhrsw %ymm9, %ymm3, %ymm3
vpand %ymm8, %ymm0, %ymm0
vpand %ymm8, %ymm1, %ymm1
vpand %ymm8, %ymm2, %ymm2
vpand %ymm8, %ymm3, %ymm3
vpackuswb %ymm1, %ymm0, %ymm0
vpackuswb %ymm3, %ymm2, %ymm2
vpmaddubsw %ymm12, %ymm0, %ymm0
vpmaddubsw %ymm12, %ymm2, %ymm2
vpackuswb %ymm2, %ymm0, %ymm0
vpmulhw 384(%rsi), %ymm11, %ymm4
vpmulhw 416(%rsi), %ymm11, %ymm5
vpmulhw 448(%rsi), %ymm11, %ymm6
vpmulhw 480(%rsi), %ymm11, %ymm7
vpmulhrsw %ymm9, %ymm4, %ymm4
vpmulhrsw %ymm9, %ymm5, %ymm5
vpmulhrsw %ymm9, %ymm6, %ymm6
vpmulhrsw %ymm9, %ymm7, %ymm7
vpand %ymm8, %ymm4, %ymm4
vpand %ymm8, %ymm5, %ymm5
vpand %ymm8, %ymm6, %ymm6
vpand %ymm8, %ymm7, %ymm7
vpackuswb %ymm5, %ymm4, %ymm4
vpackuswb %ymm7, %ymm6, %ymm6
vpmaddubsw %ymm12, %ymm4, %ymm4
vpmaddubsw %ymm12, %ymm6, %ymm6
vpackuswb %ymm6, %ymm4, %ymm4
vpermd %ymm0, %ymm10, %ymm0
vpermd %ymm4, %ymm10, %ymm4
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm4, 96(%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_compress_4_avx2,.-kyber_compress_4_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_kyber_decompress_4_avx2_mask:
.long 0xf0000f,0xf0000f,0xf0000f,0xf0000f
.long 0xf0000f,0xf0000f,0xf0000f,0xf0000f
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_kyber_decompress_4_avx2_shift:
.long 0x800800,0x800800,0x800800,0x800800
.long 0x800800,0x800800,0x800800,0x800800
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_decompress_4_avx2_q:
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_decompress_4_avx2_shuf:
.value 0x0,0x0
.value 0x101,0x101
.value 0x202,0x202
.value 0x303,0x303
.value 0x404,0x404
.value 0x505,0x505
.value 0x606,0x606
.value 0x707,0x707
#ifndef __APPLE__
.text
.globl kyber_decompress_4_avx2
.type kyber_decompress_4_avx2,@function
.align 16
kyber_decompress_4_avx2:
#else
.section __TEXT,__text
.globl _kyber_decompress_4_avx2
.p2align 4
_kyber_decompress_4_avx2:
#endif /* __APPLE__ */
vmovdqu L_kyber_decompress_4_avx2_mask(%rip), %ymm4
vmovdqu L_kyber_decompress_4_avx2_shift(%rip), %ymm5
vmovdqu L_kyber_decompress_4_avx2_shuf(%rip), %ymm6
vmovdqu L_kyber_decompress_4_avx2_q(%rip), %ymm7
vpbroadcastq (%rsi), %ymm0
vpbroadcastq 8(%rsi), %ymm1
vpbroadcastq 16(%rsi), %ymm2
vpbroadcastq 24(%rsi), %ymm3
vpshufb %ymm6, %ymm0, %ymm0
vpshufb %ymm6, %ymm1, %ymm1
vpshufb %ymm6, %ymm2, %ymm2
vpshufb %ymm6, %ymm3, %ymm3
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpand %ymm4, %ymm2, %ymm2
vpand %ymm4, %ymm3, %ymm3
vpmullw %ymm5, %ymm0, %ymm0
vpmullw %ymm5, %ymm1, %ymm1
vpmullw %ymm5, %ymm2, %ymm2
vpmullw %ymm5, %ymm3, %ymm3
vpmulhrsw %ymm7, %ymm0, %ymm0
vpmulhrsw %ymm7, %ymm1, %ymm1
vpmulhrsw %ymm7, %ymm2, %ymm2
vpmulhrsw %ymm7, %ymm3, %ymm3
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, 96(%rdi)
vpbroadcastq 32(%rsi), %ymm0
vpbroadcastq 40(%rsi), %ymm1
vpbroadcastq 48(%rsi), %ymm2
vpbroadcastq 56(%rsi), %ymm3
vpshufb %ymm6, %ymm0, %ymm0
vpshufb %ymm6, %ymm1, %ymm1
vpshufb %ymm6, %ymm2, %ymm2
vpshufb %ymm6, %ymm3, %ymm3
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpand %ymm4, %ymm2, %ymm2
vpand %ymm4, %ymm3, %ymm3
vpmullw %ymm5, %ymm0, %ymm0
vpmullw %ymm5, %ymm1, %ymm1
vpmullw %ymm5, %ymm2, %ymm2
vpmullw %ymm5, %ymm3, %ymm3
vpmulhrsw %ymm7, %ymm0, %ymm0
vpmulhrsw %ymm7, %ymm1, %ymm1
vpmulhrsw %ymm7, %ymm2, %ymm2
vpmulhrsw %ymm7, %ymm3, %ymm3
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, 160(%rdi)
vmovdqu %ymm2, 192(%rdi)
vmovdqu %ymm3, 224(%rdi)
vpbroadcastq 64(%rsi), %ymm0
vpbroadcastq 72(%rsi), %ymm1
vpbroadcastq 80(%rsi), %ymm2
vpbroadcastq 88(%rsi), %ymm3
vpshufb %ymm6, %ymm0, %ymm0
vpshufb %ymm6, %ymm1, %ymm1
vpshufb %ymm6, %ymm2, %ymm2
vpshufb %ymm6, %ymm3, %ymm3
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpand %ymm4, %ymm2, %ymm2
vpand %ymm4, %ymm3, %ymm3
vpmullw %ymm5, %ymm0, %ymm0
vpmullw %ymm5, %ymm1, %ymm1
vpmullw %ymm5, %ymm2, %ymm2
vpmullw %ymm5, %ymm3, %ymm3
vpmulhrsw %ymm7, %ymm0, %ymm0
vpmulhrsw %ymm7, %ymm1, %ymm1
vpmulhrsw %ymm7, %ymm2, %ymm2
vpmulhrsw %ymm7, %ymm3, %ymm3
vmovdqu %ymm0, 256(%rdi)
vmovdqu %ymm1, 288(%rdi)
vmovdqu %ymm2, 320(%rdi)
vmovdqu %ymm3, 352(%rdi)
vpbroadcastq 96(%rsi), %ymm0
vpbroadcastq 104(%rsi), %ymm1
vpbroadcastq 112(%rsi), %ymm2
vpbroadcastq 120(%rsi), %ymm3
vpshufb %ymm6, %ymm0, %ymm0
vpshufb %ymm6, %ymm1, %ymm1
vpshufb %ymm6, %ymm2, %ymm2
vpshufb %ymm6, %ymm3, %ymm3
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpand %ymm4, %ymm2, %ymm2
vpand %ymm4, %ymm3, %ymm3
vpmullw %ymm5, %ymm0, %ymm0
vpmullw %ymm5, %ymm1, %ymm1
vpmullw %ymm5, %ymm2, %ymm2
vpmullw %ymm5, %ymm3, %ymm3
vpmulhrsw %ymm7, %ymm0, %ymm0
vpmulhrsw %ymm7, %ymm1, %ymm1
vpmulhrsw %ymm7, %ymm2, %ymm2
vpmulhrsw %ymm7, %ymm3, %ymm3
vmovdqu %ymm0, 384(%rdi)
vmovdqu %ymm1, 416(%rdi)
vmovdqu %ymm2, 448(%rdi)
vmovdqu %ymm3, 480(%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_decompress_4_avx2,.-kyber_decompress_4_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_5_avx2_v:
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
.value 0x4ebf,0x4ebf
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_5_avx2_shift:
.value 0x400,0x400
.value 0x400,0x400
.value 0x400,0x400
.value 0x400,0x400
.value 0x400,0x400
.value 0x400,0x400
.value 0x400,0x400
.value 0x400,0x400
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_5_avx2_mask:
.value 0x1f,0x1f
.value 0x1f,0x1f
.value 0x1f,0x1f
.value 0x1f,0x1f
.value 0x1f,0x1f
.value 0x1f,0x1f
.value 0x1f,0x1f
.value 0x1f,0x1f
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_5_avx2_shift1:
.value 0x2001,0x2001
.value 0x2001,0x2001
.value 0x2001,0x2001
.value 0x2001,0x2001
.value 0x2001,0x2001
.value 0x2001,0x2001
.value 0x2001,0x2001
.value 0x2001,0x2001
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_kyber_compress_5_avx2_shift2:
.long 0x4000001,0x4000001,0x4000001,0x4000001
.long 0x4000001,0x4000001,0x4000001,0x4000001
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_kyber_compress_5_avx2_shlv:
.quad 0xc, 0xc
.quad 0xc, 0xc
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_compress_5_avx2_shuffle:
.value 0x100,0x302
.value 0xff04,0xffff
.value 0xffff,0x908
.value 0xb0a,0xff0c
.value 0xa09,0xc0b
.value 0xff,0x201
.value 0x403,0xffff
.value 0xffff,0x8ff
#ifndef __APPLE__
.text
.globl kyber_compress_5_avx2
.type kyber_compress_5_avx2,@function
.align 16
kyber_compress_5_avx2:
#else
.section __TEXT,__text
.globl _kyber_compress_5_avx2
.p2align 4
_kyber_compress_5_avx2:
#endif /* __APPLE__ */
vmovdqu (%rsi), %ymm0
vmovdqu L_kyber_compress_5_avx2_v(%rip), %ymm2
vmovdqu L_kyber_compress_5_avx2_shift(%rip), %ymm3
vmovdqu L_kyber_compress_5_avx2_mask(%rip), %ymm4
vmovdqu L_kyber_compress_5_avx2_shift1(%rip), %ymm5
vmovdqu L_kyber_compress_5_avx2_shift2(%rip), %ymm6
vmovdqu L_kyber_compress_5_avx2_shlv(%rip), %ymm7
vmovdqu L_kyber_compress_5_avx2_shuffle(%rip), %ymm8
vpmulhw (%rsi), %ymm2, %ymm0
vpmulhw 32(%rsi), %ymm2, %ymm1
vpmulhrsw %ymm3, %ymm0, %ymm0
vpmulhrsw %ymm3, %ymm1, %ymm1
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vpmaddubsw %ymm5, %ymm0, %ymm0
vpmaddwd %ymm6, %ymm0, %ymm0
vpsllvd %ymm7, %ymm0, %ymm0
vpsrlvq %ymm7, %ymm0, %ymm0
vpshufb %ymm8, %ymm0, %ymm0
vextracti128 $0x01, %ymm0, %xmm1
vpblendvb %xmm8, %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, (%rdi)
movss %xmm1, 16(%rdi)
vpmulhw 64(%rsi), %ymm2, %ymm0
vpmulhw 96(%rsi), %ymm2, %ymm1
vpmulhrsw %ymm3, %ymm0, %ymm0
vpmulhrsw %ymm3, %ymm1, %ymm1
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vpmaddubsw %ymm5, %ymm0, %ymm0
vpmaddwd %ymm6, %ymm0, %ymm0
vpsllvd %ymm7, %ymm0, %ymm0
vpsrlvq %ymm7, %ymm0, %ymm0
vpshufb %ymm8, %ymm0, %ymm0
vextracti128 $0x01, %ymm0, %xmm1
vpblendvb %xmm8, %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, 20(%rdi)
movss %xmm1, 36(%rdi)
vpmulhw 128(%rsi), %ymm2, %ymm0
vpmulhw 160(%rsi), %ymm2, %ymm1
vpmulhrsw %ymm3, %ymm0, %ymm0
vpmulhrsw %ymm3, %ymm1, %ymm1
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vpmaddubsw %ymm5, %ymm0, %ymm0
vpmaddwd %ymm6, %ymm0, %ymm0
vpsllvd %ymm7, %ymm0, %ymm0
vpsrlvq %ymm7, %ymm0, %ymm0
vpshufb %ymm8, %ymm0, %ymm0
vextracti128 $0x01, %ymm0, %xmm1
vpblendvb %xmm8, %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, 40(%rdi)
movss %xmm1, 56(%rdi)
vpmulhw 192(%rsi), %ymm2, %ymm0
vpmulhw 224(%rsi), %ymm2, %ymm1
vpmulhrsw %ymm3, %ymm0, %ymm0
vpmulhrsw %ymm3, %ymm1, %ymm1
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vpmaddubsw %ymm5, %ymm0, %ymm0
vpmaddwd %ymm6, %ymm0, %ymm0
vpsllvd %ymm7, %ymm0, %ymm0
vpsrlvq %ymm7, %ymm0, %ymm0
vpshufb %ymm8, %ymm0, %ymm0
vextracti128 $0x01, %ymm0, %xmm1
vpblendvb %xmm8, %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, 60(%rdi)
movss %xmm1, 76(%rdi)
vpmulhw 256(%rsi), %ymm2, %ymm0
vpmulhw 288(%rsi), %ymm2, %ymm1
vpmulhrsw %ymm3, %ymm0, %ymm0
vpmulhrsw %ymm3, %ymm1, %ymm1
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vpmaddubsw %ymm5, %ymm0, %ymm0
vpmaddwd %ymm6, %ymm0, %ymm0
vpsllvd %ymm7, %ymm0, %ymm0
vpsrlvq %ymm7, %ymm0, %ymm0
vpshufb %ymm8, %ymm0, %ymm0
vextracti128 $0x01, %ymm0, %xmm1
vpblendvb %xmm8, %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, 80(%rdi)
movss %xmm1, 96(%rdi)
vpmulhw 320(%rsi), %ymm2, %ymm0
vpmulhw 352(%rsi), %ymm2, %ymm1
vpmulhrsw %ymm3, %ymm0, %ymm0
vpmulhrsw %ymm3, %ymm1, %ymm1
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vpmaddubsw %ymm5, %ymm0, %ymm0
vpmaddwd %ymm6, %ymm0, %ymm0
vpsllvd %ymm7, %ymm0, %ymm0
vpsrlvq %ymm7, %ymm0, %ymm0
vpshufb %ymm8, %ymm0, %ymm0
vextracti128 $0x01, %ymm0, %xmm1
vpblendvb %xmm8, %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, 100(%rdi)
movss %xmm1, 116(%rdi)
vpmulhw 384(%rsi), %ymm2, %ymm0
vpmulhw 416(%rsi), %ymm2, %ymm1
vpmulhrsw %ymm3, %ymm0, %ymm0
vpmulhrsw %ymm3, %ymm1, %ymm1
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vpmaddubsw %ymm5, %ymm0, %ymm0
vpmaddwd %ymm6, %ymm0, %ymm0
vpsllvd %ymm7, %ymm0, %ymm0
vpsrlvq %ymm7, %ymm0, %ymm0
vpshufb %ymm8, %ymm0, %ymm0
vextracti128 $0x01, %ymm0, %xmm1
vpblendvb %xmm8, %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, 120(%rdi)
movss %xmm1, 136(%rdi)
vpmulhw 448(%rsi), %ymm2, %ymm0
vpmulhw 480(%rsi), %ymm2, %ymm1
vpmulhrsw %ymm3, %ymm0, %ymm0
vpmulhrsw %ymm3, %ymm1, %ymm1
vpand %ymm4, %ymm0, %ymm0
vpand %ymm4, %ymm1, %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vpmaddubsw %ymm5, %ymm0, %ymm0
vpmaddwd %ymm6, %ymm0, %ymm0
vpsllvd %ymm7, %ymm0, %ymm0
vpsrlvq %ymm7, %ymm0, %ymm0
vpshufb %ymm8, %ymm0, %ymm0
vextracti128 $0x01, %ymm0, %xmm1
vpblendvb %xmm8, %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, 140(%rdi)
movss %xmm1, 156(%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_compress_5_avx2,.-kyber_compress_5_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_decompress_5_avx2_q:
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
.value 0xd01,0xd01
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_decompress_5_avx2_shuf:
.value 0x0,0x100
.value 0x101,0x201
.value 0x302,0x303
.value 0x403,0x404
.value 0x505,0x605
.value 0x606,0x706
.value 0x807,0x808
.value 0x908,0x909
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_decompress_5_avx2_mask:
.value 0x1f,0x3e0
.value 0x7c,0xf80
.value 0x1f0,0x3e
.value 0x7c0,0xfb
.value 0x1f,0x3e0
.value 0x7c,0xf80
.value 0x1f0,0x3e
.value 0x7c0,0xfb
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_decompress_5_avx2_shift:
.value 0x400,0x20
.value 0x100,0x8
.value 0x40,0x200
.value 0x10,0x80
.value 0x400,0x20
.value 0x100,0x8
.value 0x40,0x200
.value 0x10,0x80
#ifndef __APPLE__
.text
.globl kyber_decompress_5_avx2
.type kyber_decompress_5_avx2,@function
.align 16
kyber_decompress_5_avx2:
#else
.section __TEXT,__text
.globl _kyber_decompress_5_avx2
.p2align 4
_kyber_decompress_5_avx2:
#endif /* __APPLE__ */
vmovdqu L_kyber_decompress_5_avx2_q(%rip), %ymm1
vmovdqu L_kyber_decompress_5_avx2_shuf(%rip), %ymm2
vmovdqu L_kyber_decompress_5_avx2_mask(%rip), %ymm3
vmovdqu L_kyber_decompress_5_avx2_shift(%rip), %ymm4
vbroadcasti128 (%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, (%rdi)
vbroadcasti128 10(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 32(%rdi)
vbroadcasti128 20(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 64(%rdi)
vbroadcasti128 30(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 96(%rdi)
vbroadcasti128 40(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 128(%rdi)
vbroadcasti128 50(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 160(%rdi)
vbroadcasti128 60(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 192(%rdi)
vbroadcasti128 70(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 224(%rdi)
vbroadcasti128 80(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 256(%rdi)
vbroadcasti128 90(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 288(%rdi)
vbroadcasti128 100(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 320(%rdi)
vbroadcasti128 110(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 352(%rdi)
vbroadcasti128 120(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 384(%rdi)
vbroadcasti128 130(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 416(%rdi)
vbroadcasti128 140(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 448(%rdi)
vbroadcasti128 150(%rsi), %ymm0
vpshufb %ymm2, %ymm0, %ymm0
vpand %ymm3, %ymm0, %ymm0
vpmullw %ymm4, %ymm0, %ymm0
vpmulhrsw %ymm1, %ymm0, %ymm0
vmovdqu %ymm0, 480(%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_decompress_5_avx2,.-kyber_decompress_5_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_kyber_from_msg_avx2_shift:
.long 0x3,0x2,0x1,0x0
.long 0x3,0x2,0x1,0x0
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_from_msg_avx2_shuf:
.value 0x100,0x504
.value 0x908,0xd0c
.value 0x302,0x706
.value 0xb0a,0xf0e
.value 0x100,0x504
.value 0x908,0xd0c
.value 0x302,0x706
.value 0xb0a,0xf0e
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_from_msg_avx2_hqs:
.value 0x681,0x681
.value 0x681,0x681
.value 0x681,0x681
.value 0x681,0x681
.value 0x681,0x681
.value 0x681,0x681
.value 0x681,0x681
.value 0x681,0x681
#ifndef __APPLE__
.text
.globl kyber_from_msg_avx2
.type kyber_from_msg_avx2,@function
.align 16
kyber_from_msg_avx2:
#else
.section __TEXT,__text
.globl _kyber_from_msg_avx2
.p2align 4
_kyber_from_msg_avx2:
#endif /* __APPLE__ */
vmovdqu (%rsi), %ymm0
vmovdqu L_kyber_from_msg_avx2_shift(%rip), %ymm9
vmovdqu L_kyber_from_msg_avx2_shuf(%rip), %ymm10
vmovdqu L_kyber_from_msg_avx2_hqs(%rip), %ymm11
vpshufd $0x00, %ymm0, %ymm4
vpsllvd %ymm9, %ymm4, %ymm4
vpshufb %ymm10, %ymm4, %ymm4
vpsllw $12, %ymm4, %ymm1
vpsllw $8, %ymm4, %ymm2
vpsllw $4, %ymm4, %ymm3
vpsraw $15, %ymm1, %ymm1
vpsraw $15, %ymm2, %ymm2
vpsraw $15, %ymm3, %ymm3
vpsraw $15, %ymm4, %ymm4
vpand %ymm11, %ymm1, %ymm1
vpand %ymm11, %ymm2, %ymm2
vpand %ymm11, %ymm3, %ymm3
vpand %ymm11, %ymm4, %ymm4
vpunpcklqdq %ymm2, %ymm1, %ymm5
vpunpckhqdq %ymm2, %ymm1, %ymm7
vpunpcklqdq %ymm4, %ymm3, %ymm6
vpunpckhqdq %ymm4, %ymm3, %ymm8
vperm2i128 $32, %ymm6, %ymm5, %ymm1
vperm2i128 $49, %ymm6, %ymm5, %ymm3
vperm2i128 $32, %ymm8, %ymm7, %ymm2
vperm2i128 $49, %ymm8, %ymm7, %ymm4
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, 256(%rdi)
vmovdqu %ymm4, 288(%rdi)
vpshufd $0x55, %ymm0, %ymm4
vpsllvd %ymm9, %ymm4, %ymm4
vpshufb %ymm10, %ymm4, %ymm4
vpsllw $12, %ymm4, %ymm1
vpsllw $8, %ymm4, %ymm2
vpsllw $4, %ymm4, %ymm3
vpsraw $15, %ymm1, %ymm1
vpsraw $15, %ymm2, %ymm2
vpsraw $15, %ymm3, %ymm3
vpsraw $15, %ymm4, %ymm4
vpand %ymm11, %ymm1, %ymm1
vpand %ymm11, %ymm2, %ymm2
vpand %ymm11, %ymm3, %ymm3
vpand %ymm11, %ymm4, %ymm4
vpunpcklqdq %ymm2, %ymm1, %ymm5
vpunpckhqdq %ymm2, %ymm1, %ymm7
vpunpcklqdq %ymm4, %ymm3, %ymm6
vpunpckhqdq %ymm4, %ymm3, %ymm8
vperm2i128 $32, %ymm6, %ymm5, %ymm1
vperm2i128 $49, %ymm6, %ymm5, %ymm3
vperm2i128 $32, %ymm8, %ymm7, %ymm2
vperm2i128 $49, %ymm8, %ymm7, %ymm4
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, 320(%rdi)
vmovdqu %ymm4, 352(%rdi)
vpshufd $0xaa, %ymm0, %ymm4
vpsllvd %ymm9, %ymm4, %ymm4
vpshufb %ymm10, %ymm4, %ymm4
vpsllw $12, %ymm4, %ymm1
vpsllw $8, %ymm4, %ymm2
vpsllw $4, %ymm4, %ymm3
vpsraw $15, %ymm1, %ymm1
vpsraw $15, %ymm2, %ymm2
vpsraw $15, %ymm3, %ymm3
vpsraw $15, %ymm4, %ymm4
vpand %ymm11, %ymm1, %ymm1
vpand %ymm11, %ymm2, %ymm2
vpand %ymm11, %ymm3, %ymm3
vpand %ymm11, %ymm4, %ymm4
vpunpcklqdq %ymm2, %ymm1, %ymm5
vpunpckhqdq %ymm2, %ymm1, %ymm7
vpunpcklqdq %ymm4, %ymm3, %ymm6
vpunpckhqdq %ymm4, %ymm3, %ymm8
vperm2i128 $32, %ymm6, %ymm5, %ymm1
vperm2i128 $49, %ymm6, %ymm5, %ymm3
vperm2i128 $32, %ymm8, %ymm7, %ymm2
vperm2i128 $49, %ymm8, %ymm7, %ymm4
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, 160(%rdi)
vmovdqu %ymm3, 384(%rdi)
vmovdqu %ymm4, 416(%rdi)
vpshufd $0xff, %ymm0, %ymm4
vpsllvd %ymm9, %ymm4, %ymm4
vpshufb %ymm10, %ymm4, %ymm4
vpsllw $12, %ymm4, %ymm1
vpsllw $8, %ymm4, %ymm2
vpsllw $4, %ymm4, %ymm3
vpsraw $15, %ymm1, %ymm1
vpsraw $15, %ymm2, %ymm2
vpsraw $15, %ymm3, %ymm3
vpsraw $15, %ymm4, %ymm4
vpand %ymm11, %ymm1, %ymm1
vpand %ymm11, %ymm2, %ymm2
vpand %ymm11, %ymm3, %ymm3
vpand %ymm11, %ymm4, %ymm4
vpunpcklqdq %ymm2, %ymm1, %ymm5
vpunpckhqdq %ymm2, %ymm1, %ymm7
vpunpcklqdq %ymm4, %ymm3, %ymm6
vpunpckhqdq %ymm4, %ymm3, %ymm8
vperm2i128 $32, %ymm6, %ymm5, %ymm1
vperm2i128 $49, %ymm6, %ymm5, %ymm3
vperm2i128 $32, %ymm8, %ymm7, %ymm2
vperm2i128 $49, %ymm8, %ymm7, %ymm4
vmovdqu %ymm1, 192(%rdi)
vmovdqu %ymm2, 224(%rdi)
vmovdqu %ymm3, 448(%rdi)
vmovdqu %ymm4, 480(%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_from_msg_avx2,.-kyber_from_msg_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_to_msg_avx2_hqs:
.value 0x680,0x680
.value 0x680,0x680
.value 0x680,0x680
.value 0x680,0x680
.value 0x680,0x680
.value 0x680,0x680
.value 0x680,0x680
.value 0x680,0x680
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_to_msg_avx2_hhqs:
.value 0xfcc1,0xfcc1
.value 0xfcc1,0xfcc1
.value 0xfcc1,0xfcc1
.value 0xfcc1,0xfcc1
.value 0xfcc1,0xfcc1
.value 0xfcc1,0xfcc1
.value 0xfcc1,0xfcc1
.value 0xfcc1,0xfcc1
#ifndef __APPLE__
.text
.globl kyber_to_msg_avx2
.type kyber_to_msg_avx2,@function
.align 16
kyber_to_msg_avx2:
#else
.section __TEXT,__text
.globl _kyber_to_msg_avx2
.p2align 4
_kyber_to_msg_avx2:
#endif /* __APPLE__ */
vmovdqu L_kyber_to_msg_avx2_hqs(%rip), %ymm8
vmovdqu L_kyber_to_msg_avx2_hhqs(%rip), %ymm9
vpsubw (%rsi), %ymm8, %ymm0
vpsubw 32(%rsi), %ymm8, %ymm1
vpsubw 64(%rsi), %ymm8, %ymm2
vpsubw 96(%rsi), %ymm8, %ymm3
vpsraw $15, %ymm0, %ymm4
vpsraw $15, %ymm1, %ymm5
vpsraw $15, %ymm2, %ymm6
vpsraw $15, %ymm3, %ymm7
vpxor %ymm4, %ymm0, %ymm0
vpxor %ymm5, %ymm1, %ymm1
vpxor %ymm6, %ymm2, %ymm2
vpxor %ymm7, %ymm3, %ymm3
vpaddw %ymm9, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpaddw %ymm9, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
vpacksswb %ymm1, %ymm0, %ymm0
vpacksswb %ymm3, %ymm2, %ymm2
vpermq $0xd8, %ymm0, %ymm0
vpermq $0xd8, %ymm2, %ymm2
vpmovmskb %ymm0, %edx
vpmovmskb %ymm2, %eax
movl %edx, (%rdi)
movl %eax, 4(%rdi)
vpsubw 128(%rsi), %ymm8, %ymm0
vpsubw 160(%rsi), %ymm8, %ymm1
vpsubw 192(%rsi), %ymm8, %ymm2
vpsubw 224(%rsi), %ymm8, %ymm3
vpsraw $15, %ymm0, %ymm4
vpsraw $15, %ymm1, %ymm5
vpsraw $15, %ymm2, %ymm6
vpsraw $15, %ymm3, %ymm7
vpxor %ymm4, %ymm0, %ymm0
vpxor %ymm5, %ymm1, %ymm1
vpxor %ymm6, %ymm2, %ymm2
vpxor %ymm7, %ymm3, %ymm3
vpaddw %ymm9, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpaddw %ymm9, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
vpacksswb %ymm1, %ymm0, %ymm0
vpacksswb %ymm3, %ymm2, %ymm2
vpermq $0xd8, %ymm0, %ymm0
vpermq $0xd8, %ymm2, %ymm2
vpmovmskb %ymm0, %edx
vpmovmskb %ymm2, %eax
movl %edx, 8(%rdi)
movl %eax, 12(%rdi)
vpsubw 256(%rsi), %ymm8, %ymm0
vpsubw 288(%rsi), %ymm8, %ymm1
vpsubw 320(%rsi), %ymm8, %ymm2
vpsubw 352(%rsi), %ymm8, %ymm3
vpsraw $15, %ymm0, %ymm4
vpsraw $15, %ymm1, %ymm5
vpsraw $15, %ymm2, %ymm6
vpsraw $15, %ymm3, %ymm7
vpxor %ymm4, %ymm0, %ymm0
vpxor %ymm5, %ymm1, %ymm1
vpxor %ymm6, %ymm2, %ymm2
vpxor %ymm7, %ymm3, %ymm3
vpaddw %ymm9, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpaddw %ymm9, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
vpacksswb %ymm1, %ymm0, %ymm0
vpacksswb %ymm3, %ymm2, %ymm2
vpermq $0xd8, %ymm0, %ymm0
vpermq $0xd8, %ymm2, %ymm2
vpmovmskb %ymm0, %edx
vpmovmskb %ymm2, %eax
movl %edx, 16(%rdi)
movl %eax, 20(%rdi)
vpsubw 384(%rsi), %ymm8, %ymm0
vpsubw 416(%rsi), %ymm8, %ymm1
vpsubw 448(%rsi), %ymm8, %ymm2
vpsubw 480(%rsi), %ymm8, %ymm3
vpsraw $15, %ymm0, %ymm4
vpsraw $15, %ymm1, %ymm5
vpsraw $15, %ymm2, %ymm6
vpsraw $15, %ymm3, %ymm7
vpxor %ymm4, %ymm0, %ymm0
vpxor %ymm5, %ymm1, %ymm1
vpxor %ymm6, %ymm2, %ymm2
vpxor %ymm7, %ymm3, %ymm3
vpaddw %ymm9, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpaddw %ymm9, %ymm2, %ymm2
vpaddw %ymm9, %ymm3, %ymm3
vpacksswb %ymm1, %ymm0, %ymm0
vpacksswb %ymm3, %ymm2, %ymm2
vpermq $0xd8, %ymm0, %ymm0
vpermq $0xd8, %ymm2, %ymm2
vpmovmskb %ymm0, %edx
vpmovmskb %ymm2, %eax
movl %edx, 24(%rdi)
movl %eax, 28(%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_to_msg_avx2,.-kyber_to_msg_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_from_bytes_avx2_shuf:
.value 0x100,0xff02
.value 0x403,0xff05
.value 0x706,0xff08
.value 0xa09,0xff0b
.value 0x504,0xff06
.value 0x807,0xff09
.value 0xb0a,0xff0c
.value 0xe0d,0xff0f
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_kyber_from_bytes_avx2_mask:
.long 0xfff,0xfff,0xfff,0xfff
.long 0xfff,0xfff,0xfff,0xfff
#ifndef __APPLE__
.text
.globl kyber_from_bytes_avx2
.type kyber_from_bytes_avx2,@function
.align 16
kyber_from_bytes_avx2:
#else
.section __TEXT,__text
.globl _kyber_from_bytes_avx2
.p2align 4
_kyber_from_bytes_avx2:
#endif /* __APPLE__ */
vmovdqu (%rsi), %ymm0
vmovdqu L_kyber_from_bytes_avx2_shuf(%rip), %ymm12
vmovdqu L_kyber_from_bytes_avx2_mask(%rip), %ymm13
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovdqu 64(%rsi), %ymm2
vmovdqu 96(%rsi), %ymm3
vmovdqu 128(%rsi), %ymm4
vmovdqu 160(%rsi), %ymm5
vpermq $0xe9, %ymm5, %ymm7
vpermq $0x00, %ymm5, %ymm8
vpermq $62, %ymm4, %ymm6
vpermq $0x40, %ymm4, %ymm9
vpermq $3, %ymm3, %ymm5
vpermq $0x94, %ymm3, %ymm4
vpermq $0xe9, %ymm2, %ymm3
vpermq $0x00, %ymm2, %ymm10
vpermq $62, %ymm1, %ymm2
vpermq $0x40, %ymm1, %ymm11
vpermq $3, %ymm0, %ymm1
vpermq $0x94, %ymm0, %ymm0
vpblendd $0xc0, %ymm8, %ymm6, %ymm6
vpblendd $0xfc, %ymm9, %ymm5, %ymm5
vpblendd $0xc0, %ymm10, %ymm2, %ymm2
vpblendd $0xfc, %ymm11, %ymm1, %ymm1
vpshufb %ymm12, %ymm0, %ymm0
vpshufb %ymm12, %ymm1, %ymm1
vpshufb %ymm12, %ymm2, %ymm2
vpshufb %ymm12, %ymm3, %ymm3
vpshufb %ymm12, %ymm4, %ymm4
vpshufb %ymm12, %ymm5, %ymm5
vpshufb %ymm12, %ymm6, %ymm6
vpshufb %ymm12, %ymm7, %ymm7
vpandn %ymm0, %ymm13, %ymm8
vpandn %ymm1, %ymm13, %ymm9
vpandn %ymm2, %ymm13, %ymm10
vpandn %ymm3, %ymm13, %ymm11
vpand %ymm0, %ymm13, %ymm0
vpand %ymm1, %ymm13, %ymm1
vpand %ymm2, %ymm13, %ymm2
vpand %ymm3, %ymm13, %ymm3
vpslld $4, %ymm8, %ymm8
vpslld $4, %ymm9, %ymm9
vpslld $4, %ymm10, %ymm10
vpslld $4, %ymm11, %ymm11
vpor %ymm8, %ymm0, %ymm0
vpor %ymm9, %ymm1, %ymm1
vpor %ymm10, %ymm2, %ymm2
vpor %ymm11, %ymm3, %ymm3
vpandn %ymm4, %ymm13, %ymm8
vpandn %ymm5, %ymm13, %ymm9
vpandn %ymm6, %ymm13, %ymm10
vpandn %ymm7, %ymm13, %ymm11
vpand %ymm4, %ymm13, %ymm4
vpand %ymm5, %ymm13, %ymm5
vpand %ymm6, %ymm13, %ymm6
vpand %ymm7, %ymm13, %ymm7
vpslld $4, %ymm8, %ymm8
vpslld $4, %ymm9, %ymm9
vpslld $4, %ymm10, %ymm10
vpslld $4, %ymm11, %ymm11
vpor %ymm8, %ymm4, %ymm4
vpor %ymm9, %ymm5, %ymm5
vpor %ymm10, %ymm6, %ymm6
vpor %ymm11, %ymm7, %ymm7
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, 128(%rdi)
vmovdqu %ymm5, 160(%rdi)
vmovdqu %ymm6, 192(%rdi)
vmovdqu %ymm7, 224(%rdi)
vmovdqu 192(%rsi), %ymm0
vmovdqu 224(%rsi), %ymm1
vmovdqu 256(%rsi), %ymm2
vmovdqu 288(%rsi), %ymm3
vmovdqu 320(%rsi), %ymm4
vmovdqu 352(%rsi), %ymm5
vpermq $0xe9, %ymm5, %ymm7
vpermq $0x00, %ymm5, %ymm8
vpermq $62, %ymm4, %ymm6
vpermq $0x40, %ymm4, %ymm9
vpermq $3, %ymm3, %ymm5
vpermq $0x94, %ymm3, %ymm4
vpermq $0xe9, %ymm2, %ymm3
vpermq $0x00, %ymm2, %ymm10
vpermq $62, %ymm1, %ymm2
vpermq $0x40, %ymm1, %ymm11
vpermq $3, %ymm0, %ymm1
vpermq $0x94, %ymm0, %ymm0
vpblendd $0xc0, %ymm8, %ymm6, %ymm6
vpblendd $0xfc, %ymm9, %ymm5, %ymm5
vpblendd $0xc0, %ymm10, %ymm2, %ymm2
vpblendd $0xfc, %ymm11, %ymm1, %ymm1
vpshufb %ymm12, %ymm0, %ymm0
vpshufb %ymm12, %ymm1, %ymm1
vpshufb %ymm12, %ymm2, %ymm2
vpshufb %ymm12, %ymm3, %ymm3
vpshufb %ymm12, %ymm4, %ymm4
vpshufb %ymm12, %ymm5, %ymm5
vpshufb %ymm12, %ymm6, %ymm6
vpshufb %ymm12, %ymm7, %ymm7
vpandn %ymm0, %ymm13, %ymm8
vpandn %ymm1, %ymm13, %ymm9
vpandn %ymm2, %ymm13, %ymm10
vpandn %ymm3, %ymm13, %ymm11
vpand %ymm0, %ymm13, %ymm0
vpand %ymm1, %ymm13, %ymm1
vpand %ymm2, %ymm13, %ymm2
vpand %ymm3, %ymm13, %ymm3
vpslld $4, %ymm8, %ymm8
vpslld $4, %ymm9, %ymm9
vpslld $4, %ymm10, %ymm10
vpslld $4, %ymm11, %ymm11
vpor %ymm8, %ymm0, %ymm0
vpor %ymm9, %ymm1, %ymm1
vpor %ymm10, %ymm2, %ymm2
vpor %ymm11, %ymm3, %ymm3
vpandn %ymm4, %ymm13, %ymm8
vpandn %ymm5, %ymm13, %ymm9
vpandn %ymm6, %ymm13, %ymm10
vpandn %ymm7, %ymm13, %ymm11
vpand %ymm4, %ymm13, %ymm4
vpand %ymm5, %ymm13, %ymm5
vpand %ymm6, %ymm13, %ymm6
vpand %ymm7, %ymm13, %ymm7
vpslld $4, %ymm8, %ymm8
vpslld $4, %ymm9, %ymm9
vpslld $4, %ymm10, %ymm10
vpslld $4, %ymm11, %ymm11
vpor %ymm8, %ymm4, %ymm4
vpor %ymm9, %ymm5, %ymm5
vpor %ymm10, %ymm6, %ymm6
vpor %ymm11, %ymm7, %ymm7
vmovdqu %ymm0, 256(%rdi)
vmovdqu %ymm1, 288(%rdi)
vmovdqu %ymm2, 320(%rdi)
vmovdqu %ymm3, 352(%rdi)
vmovdqu %ymm4, 384(%rdi)
vmovdqu %ymm5, 416(%rdi)
vmovdqu %ymm6, 448(%rdi)
vmovdqu %ymm7, 480(%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_from_bytes_avx2,.-kyber_from_bytes_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_kyber_to_bytes_avx2_mask:
.long 0xfff,0xfff,0xfff,0xfff
.long 0xfff,0xfff,0xfff,0xfff
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_kyber_to_bytes_avx2_shuf:
.value 0x100,0x402
.value 0x605,0x908
.value 0xc0a,0xe0d
.value 0xffff,0xffff
.value 0x605,0x908
.value 0xc0a,0xe0d
.value 0xffff,0xffff
.value 0x100,0x402
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
L_kyber_to_bytes_avx2_perm:
.long 0x0,0x1,0x2,0x7
.long 0x4,0x5,0x3,0x6
#ifndef __APPLE__
.text
.globl kyber_to_bytes_avx2
.type kyber_to_bytes_avx2,@function
.align 16
kyber_to_bytes_avx2:
#else
.section __TEXT,__text
.globl _kyber_to_bytes_avx2
.p2align 4
_kyber_to_bytes_avx2:
#endif /* __APPLE__ */
vmovdqu kyber_q(%rip), %ymm12
vmovdqu L_kyber_to_bytes_avx2_mask(%rip), %ymm13
vmovdqu L_kyber_to_bytes_avx2_shuf(%rip), %ymm14
vmovdqu L_kyber_to_bytes_avx2_perm(%rip), %ymm15
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovdqu 64(%rsi), %ymm2
vmovdqu 96(%rsi), %ymm3
vmovdqu 128(%rsi), %ymm4
vmovdqu 160(%rsi), %ymm5
vmovdqu 192(%rsi), %ymm6
vmovdqu 224(%rsi), %ymm7
vpsubw %ymm12, %ymm0, %ymm8
vpsubw %ymm12, %ymm1, %ymm9
vpsubw %ymm12, %ymm2, %ymm10
vpsubw %ymm12, %ymm3, %ymm11
vpsraw $15, %ymm8, %ymm0
vpsraw $15, %ymm9, %ymm1
vpsraw $15, %ymm10, %ymm2
vpsraw $15, %ymm11, %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpaddw %ymm10, %ymm2, %ymm2
vpaddw %ymm11, %ymm3, %ymm3
vpsubw %ymm12, %ymm4, %ymm8
vpsubw %ymm12, %ymm5, %ymm9
vpsubw %ymm12, %ymm6, %ymm10
vpsubw %ymm12, %ymm7, %ymm11
vpsraw $15, %ymm8, %ymm4
vpsraw $15, %ymm9, %ymm5
vpsraw $15, %ymm10, %ymm6
vpsraw $15, %ymm11, %ymm7
vpand %ymm12, %ymm4, %ymm4
vpand %ymm12, %ymm5, %ymm5
vpand %ymm12, %ymm6, %ymm6
vpand %ymm12, %ymm7, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm5, %ymm5
vpaddw %ymm10, %ymm6, %ymm6
vpaddw %ymm11, %ymm7, %ymm7
vpsrld $16, %ymm0, %ymm8
vpsrld $16, %ymm1, %ymm9
vpsrld $16, %ymm2, %ymm10
vpsrld $16, %ymm3, %ymm11
vpand %ymm0, %ymm13, %ymm0
vpand %ymm1, %ymm13, %ymm1
vpand %ymm2, %ymm13, %ymm2
vpand %ymm3, %ymm13, %ymm3
vpslld $12, %ymm8, %ymm8
vpslld $12, %ymm9, %ymm9
vpslld $12, %ymm10, %ymm10
vpslld $12, %ymm11, %ymm11
vpor %ymm8, %ymm0, %ymm0
vpor %ymm9, %ymm1, %ymm1
vpor %ymm10, %ymm2, %ymm2
vpor %ymm11, %ymm3, %ymm3
vpsrld $16, %ymm4, %ymm8
vpsrld $16, %ymm5, %ymm9
vpsrld $16, %ymm6, %ymm10
vpsrld $16, %ymm7, %ymm11
vpand %ymm4, %ymm13, %ymm4
vpand %ymm5, %ymm13, %ymm5
vpand %ymm6, %ymm13, %ymm6
vpand %ymm7, %ymm13, %ymm7
vpslld $12, %ymm8, %ymm8
vpslld $12, %ymm9, %ymm9
vpslld $12, %ymm10, %ymm10
vpslld $12, %ymm11, %ymm11
vpor %ymm8, %ymm4, %ymm4
vpor %ymm9, %ymm5, %ymm5
vpor %ymm10, %ymm6, %ymm6
vpor %ymm11, %ymm7, %ymm7
vpshufb %ymm14, %ymm0, %ymm0
vpshufb %ymm14, %ymm1, %ymm1
vpshufb %ymm14, %ymm2, %ymm2
vpshufb %ymm14, %ymm3, %ymm3
vpshufb %ymm14, %ymm4, %ymm4
vpshufb %ymm14, %ymm5, %ymm5
vpshufb %ymm14, %ymm6, %ymm6
vpshufb %ymm14, %ymm7, %ymm7
vpermd %ymm0, %ymm15, %ymm0
vpermd %ymm1, %ymm15, %ymm1
vpermd %ymm2, %ymm15, %ymm2
vpermd %ymm3, %ymm15, %ymm3
vpermd %ymm4, %ymm15, %ymm4
vpermd %ymm5, %ymm15, %ymm5
vpermd %ymm6, %ymm15, %ymm6
vpermd %ymm7, %ymm15, %ymm7
vpermq $2, %ymm6, %ymm8
vpermq $0x90, %ymm7, %ymm7
vpermq $9, %ymm5, %ymm9
vpermq $0x40, %ymm6, %ymm6
vpermq $0x00, %ymm5, %ymm5
vpblendd $63, %ymm4, %ymm5, %ymm5
vpermq $2, %ymm2, %ymm10
vpermq $0x90, %ymm3, %ymm4
vpermq $9, %ymm1, %ymm11
vpermq $0x40, %ymm2, %ymm3
vpermq $0x00, %ymm1, %ymm2
vpblendd $63, %ymm0, %ymm2, %ymm2
vpblendd $3, %ymm8, %ymm7, %ymm7
vpblendd $15, %ymm9, %ymm6, %ymm6
vpblendd $3, %ymm10, %ymm4, %ymm4
vpblendd $15, %ymm11, %ymm3, %ymm3
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, 64(%rdi)
vmovdqu %ymm5, 96(%rdi)
vmovdqu %ymm6, 128(%rdi)
vmovdqu %ymm7, 160(%rdi)
vmovdqu 256(%rsi), %ymm0
vmovdqu 288(%rsi), %ymm1
vmovdqu 320(%rsi), %ymm2
vmovdqu 352(%rsi), %ymm3
vmovdqu 384(%rsi), %ymm4
vmovdqu 416(%rsi), %ymm5
vmovdqu 448(%rsi), %ymm6
vmovdqu 480(%rsi), %ymm7
vpsubw %ymm12, %ymm0, %ymm8
vpsubw %ymm12, %ymm1, %ymm9
vpsubw %ymm12, %ymm2, %ymm10
vpsubw %ymm12, %ymm3, %ymm11
vpsraw $15, %ymm8, %ymm0
vpsraw $15, %ymm9, %ymm1
vpsraw $15, %ymm10, %ymm2
vpsraw $15, %ymm11, %ymm3
vpand %ymm12, %ymm0, %ymm0
vpand %ymm12, %ymm1, %ymm1
vpand %ymm12, %ymm2, %ymm2
vpand %ymm12, %ymm3, %ymm3
vpaddw %ymm8, %ymm0, %ymm0
vpaddw %ymm9, %ymm1, %ymm1
vpaddw %ymm10, %ymm2, %ymm2
vpaddw %ymm11, %ymm3, %ymm3
vpsubw %ymm12, %ymm4, %ymm8
vpsubw %ymm12, %ymm5, %ymm9
vpsubw %ymm12, %ymm6, %ymm10
vpsubw %ymm12, %ymm7, %ymm11
vpsraw $15, %ymm8, %ymm4
vpsraw $15, %ymm9, %ymm5
vpsraw $15, %ymm10, %ymm6
vpsraw $15, %ymm11, %ymm7
vpand %ymm12, %ymm4, %ymm4
vpand %ymm12, %ymm5, %ymm5
vpand %ymm12, %ymm6, %ymm6
vpand %ymm12, %ymm7, %ymm7
vpaddw %ymm8, %ymm4, %ymm4
vpaddw %ymm9, %ymm5, %ymm5
vpaddw %ymm10, %ymm6, %ymm6
vpaddw %ymm11, %ymm7, %ymm7
vpsrld $16, %ymm0, %ymm8
vpsrld $16, %ymm1, %ymm9
vpsrld $16, %ymm2, %ymm10
vpsrld $16, %ymm3, %ymm11
vpand %ymm0, %ymm13, %ymm0
vpand %ymm1, %ymm13, %ymm1
vpand %ymm2, %ymm13, %ymm2
vpand %ymm3, %ymm13, %ymm3
vpslld $12, %ymm8, %ymm8
vpslld $12, %ymm9, %ymm9
vpslld $12, %ymm10, %ymm10
vpslld $12, %ymm11, %ymm11
vpor %ymm8, %ymm0, %ymm0
vpor %ymm9, %ymm1, %ymm1
vpor %ymm10, %ymm2, %ymm2
vpor %ymm11, %ymm3, %ymm3
vpsrld $16, %ymm4, %ymm8
vpsrld $16, %ymm5, %ymm9
vpsrld $16, %ymm6, %ymm10
vpsrld $16, %ymm7, %ymm11
vpand %ymm4, %ymm13, %ymm4
vpand %ymm5, %ymm13, %ymm5
vpand %ymm6, %ymm13, %ymm6
vpand %ymm7, %ymm13, %ymm7
vpslld $12, %ymm8, %ymm8
vpslld $12, %ymm9, %ymm9
vpslld $12, %ymm10, %ymm10
vpslld $12, %ymm11, %ymm11
vpor %ymm8, %ymm4, %ymm4
vpor %ymm9, %ymm5, %ymm5
vpor %ymm10, %ymm6, %ymm6
vpor %ymm11, %ymm7, %ymm7
vpshufb %ymm14, %ymm0, %ymm0
vpshufb %ymm14, %ymm1, %ymm1
vpshufb %ymm14, %ymm2, %ymm2
vpshufb %ymm14, %ymm3, %ymm3
vpshufb %ymm14, %ymm4, %ymm4
vpshufb %ymm14, %ymm5, %ymm5
vpshufb %ymm14, %ymm6, %ymm6
vpshufb %ymm14, %ymm7, %ymm7
vpermd %ymm0, %ymm15, %ymm0
vpermd %ymm1, %ymm15, %ymm1
vpermd %ymm2, %ymm15, %ymm2
vpermd %ymm3, %ymm15, %ymm3
vpermd %ymm4, %ymm15, %ymm4
vpermd %ymm5, %ymm15, %ymm5
vpermd %ymm6, %ymm15, %ymm6
vpermd %ymm7, %ymm15, %ymm7
vpermq $2, %ymm6, %ymm8
vpermq $0x90, %ymm7, %ymm7
vpermq $9, %ymm5, %ymm9
vpermq $0x40, %ymm6, %ymm6
vpermq $0x00, %ymm5, %ymm5
vpblendd $63, %ymm4, %ymm5, %ymm5
vpermq $2, %ymm2, %ymm10
vpermq $0x90, %ymm3, %ymm4
vpermq $9, %ymm1, %ymm11
vpermq $0x40, %ymm2, %ymm3
vpermq $0x00, %ymm1, %ymm2
vpblendd $63, %ymm0, %ymm2, %ymm2
vpblendd $3, %ymm8, %ymm7, %ymm7
vpblendd $15, %ymm9, %ymm6, %ymm6
vpblendd $3, %ymm10, %ymm4, %ymm4
vpblendd $15, %ymm11, %ymm3, %ymm3
vmovdqu %ymm2, 192(%rdi)
vmovdqu %ymm3, 224(%rdi)
vmovdqu %ymm4, 256(%rdi)
vmovdqu %ymm5, 288(%rdi)
vmovdqu %ymm6, 320(%rdi)
vmovdqu %ymm7, 352(%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_to_bytes_avx2,.-kyber_to_bytes_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl kyber_cmp_avx2
.type kyber_cmp_avx2,@function
.align 16
kyber_cmp_avx2:
#else
.section __TEXT,__text
.globl _kyber_cmp_avx2
.p2align 4
_kyber_cmp_avx2:
#endif /* __APPLE__ */
vpxor %ymm2, %ymm2, %ymm2
vpxor %ymm3, %ymm3, %ymm3
movl $0x00, %ecx
movl $-1, %r8d
vmovdqu (%rdi), %ymm0
vmovdqu 32(%rdi), %ymm1
vpxor (%rsi), %ymm0, %ymm0
vpxor 32(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 64(%rdi), %ymm0
vmovdqu 96(%rdi), %ymm1
vpxor 64(%rsi), %ymm0, %ymm0
vpxor 96(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 128(%rdi), %ymm0
vmovdqu 160(%rdi), %ymm1
vpxor 128(%rsi), %ymm0, %ymm0
vpxor 160(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 192(%rdi), %ymm0
vmovdqu 224(%rdi), %ymm1
vpxor 192(%rsi), %ymm0, %ymm0
vpxor 224(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 256(%rdi), %ymm0
vmovdqu 288(%rdi), %ymm1
vpxor 256(%rsi), %ymm0, %ymm0
vpxor 288(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 320(%rdi), %ymm0
vmovdqu 352(%rdi), %ymm1
vpxor 320(%rsi), %ymm0, %ymm0
vpxor 352(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 384(%rdi), %ymm0
vmovdqu 416(%rdi), %ymm1
vpxor 384(%rsi), %ymm0, %ymm0
vpxor 416(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 448(%rdi), %ymm0
vmovdqu 480(%rdi), %ymm1
vpxor 448(%rsi), %ymm0, %ymm0
vpxor 480(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 512(%rdi), %ymm0
vmovdqu 544(%rdi), %ymm1
vpxor 512(%rsi), %ymm0, %ymm0
vpxor 544(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 576(%rdi), %ymm0
vmovdqu 608(%rdi), %ymm1
vpxor 576(%rsi), %ymm0, %ymm0
vpxor 608(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 640(%rdi), %ymm0
vmovdqu 672(%rdi), %ymm1
vpxor 640(%rsi), %ymm0, %ymm0
vpxor 672(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 704(%rdi), %ymm0
vmovdqu 736(%rdi), %ymm1
vpxor 704(%rsi), %ymm0, %ymm0
vpxor 736(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
subl $0x300, %edx
jz L_kyber_cmp_avx2_done
vmovdqu 768(%rdi), %ymm0
vmovdqu 800(%rdi), %ymm1
vpxor 768(%rsi), %ymm0, %ymm0
vpxor 800(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 832(%rdi), %ymm0
vmovdqu 864(%rdi), %ymm1
vpxor 832(%rsi), %ymm0, %ymm0
vpxor 864(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 896(%rdi), %ymm0
vmovdqu 928(%rdi), %ymm1
vpxor 896(%rsi), %ymm0, %ymm0
vpxor 928(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 960(%rdi), %ymm0
vmovdqu 992(%rdi), %ymm1
vpxor 960(%rsi), %ymm0, %ymm0
vpxor 992(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 1024(%rdi), %ymm0
vmovdqu 1056(%rdi), %ymm1
vpxor 1024(%rsi), %ymm0, %ymm0
vpxor 1056(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
subl $0x140, %edx
jz L_kyber_cmp_avx2_done
vmovdqu 1088(%rdi), %ymm0
vmovdqu 1120(%rdi), %ymm1
vpxor 1088(%rsi), %ymm0, %ymm0
vpxor 1120(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 1152(%rdi), %ymm0
vmovdqu 1184(%rdi), %ymm1
vpxor 1152(%rsi), %ymm0, %ymm0
vpxor 1184(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 1216(%rdi), %ymm0
vmovdqu 1248(%rdi), %ymm1
vpxor 1216(%rsi), %ymm0, %ymm0
vpxor 1248(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 1280(%rdi), %ymm0
vmovdqu 1312(%rdi), %ymm1
vpxor 1280(%rsi), %ymm0, %ymm0
vpxor 1312(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 1344(%rdi), %ymm0
vmovdqu 1376(%rdi), %ymm1
vpxor 1344(%rsi), %ymm0, %ymm0
vpxor 1376(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 1408(%rdi), %ymm0
vmovdqu 1440(%rdi), %ymm1
vpxor 1408(%rsi), %ymm0, %ymm0
vpxor 1440(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
vmovdqu 1472(%rdi), %ymm0
vmovdqu 1504(%rdi), %ymm1
vpxor 1472(%rsi), %ymm0, %ymm0
vpxor 1504(%rsi), %ymm1, %ymm1
vpor %ymm0, %ymm2, %ymm2
vpor %ymm1, %ymm3, %ymm3
L_kyber_cmp_avx2_done:
vpor %ymm3, %ymm2, %ymm2
vptest %ymm2, %ymm2
cmovzl %ecx, %eax
cmovnzl %r8d, %eax
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_cmp_avx2,.-kyber_cmp_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl kyber_redistribute_21_rand_avx2
.type kyber_redistribute_21_rand_avx2,@function
.align 16
kyber_redistribute_21_rand_avx2:
#else
.section __TEXT,__text
.globl _kyber_redistribute_21_rand_avx2
.p2align 4
_kyber_redistribute_21_rand_avx2:
#endif /* __APPLE__ */
vmovdqu (%rdi), %ymm0
vmovdqu 32(%rdi), %ymm1
vmovdqu 64(%rdi), %ymm2
vmovdqu 96(%rdi), %ymm3
vmovdqu 128(%rdi), %ymm4
vmovdqu 160(%rdi), %ymm5
vmovdqu 192(%rdi), %ymm6
vmovdqu 224(%rdi), %ymm7
vmovdqu 256(%rdi), %ymm8
vmovdqu 288(%rdi), %ymm9
vmovdqu 320(%rdi), %ymm10
vmovdqu 352(%rdi), %ymm11
vpunpcklqdq %ymm1, %ymm0, %ymm12
vpunpckhqdq %ymm1, %ymm0, %ymm13
vpunpcklqdq %ymm3, %ymm2, %ymm14
vpunpckhqdq %ymm3, %ymm2, %ymm15
vperm2i128 $32, %ymm14, %ymm12, %ymm0
vperm2i128 $32, %ymm15, %ymm13, %ymm1
vperm2i128 $49, %ymm14, %ymm12, %ymm2
vperm2i128 $49, %ymm15, %ymm13, %ymm3
vpunpcklqdq %ymm5, %ymm4, %ymm12
vpunpckhqdq %ymm5, %ymm4, %ymm13
vpunpcklqdq %ymm7, %ymm6, %ymm14
vpunpckhqdq %ymm7, %ymm6, %ymm15
vperm2i128 $32, %ymm14, %ymm12, %ymm4
vperm2i128 $32, %ymm15, %ymm13, %ymm5
vperm2i128 $49, %ymm14, %ymm12, %ymm6
vperm2i128 $49, %ymm15, %ymm13, %ymm7
vpunpcklqdq %ymm9, %ymm8, %ymm12
vpunpckhqdq %ymm9, %ymm8, %ymm13
vpunpcklqdq %ymm11, %ymm10, %ymm14
vpunpckhqdq %ymm11, %ymm10, %ymm15
vperm2i128 $32, %ymm14, %ymm12, %ymm8
vperm2i128 $32, %ymm15, %ymm13, %ymm9
vperm2i128 $49, %ymm14, %ymm12, %ymm10
vperm2i128 $49, %ymm15, %ymm13, %ymm11
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm4, 32(%rsi)
vmovdqu %ymm8, 64(%rsi)
vmovdqu %ymm1, (%rdx)
vmovdqu %ymm5, 32(%rdx)
vmovdqu %ymm9, 64(%rdx)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm6, 32(%rcx)
vmovdqu %ymm10, 64(%rcx)
vmovdqu %ymm3, (%r8)
vmovdqu %ymm7, 32(%r8)
vmovdqu %ymm11, 64(%r8)
vmovdqu 384(%rdi), %ymm0
vmovdqu 416(%rdi), %ymm1
vmovdqu 448(%rdi), %ymm2
vmovdqu 480(%rdi), %ymm3
vmovdqu 512(%rdi), %ymm4
vmovdqu 544(%rdi), %ymm5
vmovdqu 576(%rdi), %ymm6
vmovdqu 608(%rdi), %ymm7
movq 640(%rdi), %rax
movq 648(%rdi), %r9
movq 656(%rdi), %r10
movq 664(%rdi), %r11
vpunpcklqdq %ymm1, %ymm0, %ymm12
vpunpckhqdq %ymm1, %ymm0, %ymm13
vpunpcklqdq %ymm3, %ymm2, %ymm14
vpunpckhqdq %ymm3, %ymm2, %ymm15
vperm2i128 $32, %ymm14, %ymm12, %ymm0
vperm2i128 $32, %ymm15, %ymm13, %ymm1
vperm2i128 $49, %ymm14, %ymm12, %ymm2
vperm2i128 $49, %ymm15, %ymm13, %ymm3
vpunpcklqdq %ymm5, %ymm4, %ymm12
vpunpckhqdq %ymm5, %ymm4, %ymm13
vpunpcklqdq %ymm7, %ymm6, %ymm14
vpunpckhqdq %ymm7, %ymm6, %ymm15
vperm2i128 $32, %ymm14, %ymm12, %ymm4
vperm2i128 $32, %ymm15, %ymm13, %ymm5
vperm2i128 $49, %ymm14, %ymm12, %ymm6
vperm2i128 $49, %ymm15, %ymm13, %ymm7
vmovdqu %ymm0, 96(%rsi)
vmovdqu %ymm4, 128(%rsi)
movq %rax, 160(%rsi)
vmovdqu %ymm1, 96(%rdx)
vmovdqu %ymm5, 128(%rdx)
movq %r9, 160(%rdx)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm6, 128(%rcx)
movq %r10, 160(%rcx)
vmovdqu %ymm3, 96(%r8)
vmovdqu %ymm7, 128(%r8)
movq %r11, 160(%r8)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_redistribute_21_rand_avx2,.-kyber_redistribute_21_rand_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl kyber_redistribute_17_rand_avx2
.type kyber_redistribute_17_rand_avx2,@function
.align 16
kyber_redistribute_17_rand_avx2:
#else
.section __TEXT,__text
.globl _kyber_redistribute_17_rand_avx2
.p2align 4
_kyber_redistribute_17_rand_avx2:
#endif /* __APPLE__ */
vmovdqu (%rdi), %ymm0
vmovdqu 32(%rdi), %ymm1
vmovdqu 64(%rdi), %ymm2
vmovdqu 96(%rdi), %ymm3
vmovdqu 128(%rdi), %ymm4
vmovdqu 160(%rdi), %ymm5
vmovdqu 192(%rdi), %ymm6
vmovdqu 224(%rdi), %ymm7
vpunpcklqdq %ymm1, %ymm0, %ymm8
vpunpckhqdq %ymm1, %ymm0, %ymm9
vpunpcklqdq %ymm3, %ymm2, %ymm10
vpunpckhqdq %ymm3, %ymm2, %ymm11
vperm2i128 $32, %ymm10, %ymm8, %ymm0
vperm2i128 $32, %ymm11, %ymm9, %ymm1
vperm2i128 $49, %ymm10, %ymm8, %ymm2
vperm2i128 $49, %ymm11, %ymm9, %ymm3
vpunpcklqdq %ymm5, %ymm4, %ymm8
vpunpckhqdq %ymm5, %ymm4, %ymm9
vpunpcklqdq %ymm7, %ymm6, %ymm10
vpunpckhqdq %ymm7, %ymm6, %ymm11
vperm2i128 $32, %ymm10, %ymm8, %ymm4
vperm2i128 $32, %ymm11, %ymm9, %ymm5
vperm2i128 $49, %ymm10, %ymm8, %ymm6
vperm2i128 $49, %ymm11, %ymm9, %ymm7
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm4, 32(%rsi)
vmovdqu %ymm1, (%rdx)
vmovdqu %ymm5, 32(%rdx)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm6, 32(%rcx)
vmovdqu %ymm3, (%r8)
vmovdqu %ymm7, 32(%r8)
vmovdqu 256(%rdi), %ymm0
vmovdqu 288(%rdi), %ymm1
vmovdqu 320(%rdi), %ymm2
vmovdqu 352(%rdi), %ymm3
vmovdqu 384(%rdi), %ymm4
vmovdqu 416(%rdi), %ymm5
vmovdqu 448(%rdi), %ymm6
vmovdqu 480(%rdi), %ymm7
movq 512(%rdi), %rax
movq 520(%rdi), %r9
movq 528(%rdi), %r10
movq 536(%rdi), %r11
vpunpcklqdq %ymm1, %ymm0, %ymm8
vpunpckhqdq %ymm1, %ymm0, %ymm9
vpunpcklqdq %ymm3, %ymm2, %ymm10
vpunpckhqdq %ymm3, %ymm2, %ymm11
vperm2i128 $32, %ymm10, %ymm8, %ymm0
vperm2i128 $32, %ymm11, %ymm9, %ymm1
vperm2i128 $49, %ymm10, %ymm8, %ymm2
vperm2i128 $49, %ymm11, %ymm9, %ymm3
vpunpcklqdq %ymm5, %ymm4, %ymm8
vpunpckhqdq %ymm5, %ymm4, %ymm9
vpunpcklqdq %ymm7, %ymm6, %ymm10
vpunpckhqdq %ymm7, %ymm6, %ymm11
vperm2i128 $32, %ymm10, %ymm8, %ymm4
vperm2i128 $32, %ymm11, %ymm9, %ymm5
vperm2i128 $49, %ymm10, %ymm8, %ymm6
vperm2i128 $49, %ymm11, %ymm9, %ymm7
vmovdqu %ymm0, 64(%rsi)
vmovdqu %ymm4, 96(%rsi)
movq %rax, 128(%rsi)
vmovdqu %ymm1, 64(%rdx)
vmovdqu %ymm5, 96(%rdx)
movq %r9, 128(%rdx)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm6, 96(%rcx)
movq %r10, 128(%rcx)
vmovdqu %ymm3, 64(%r8)
vmovdqu %ymm7, 96(%r8)
movq %r11, 128(%r8)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_redistribute_17_rand_avx2,.-kyber_redistribute_17_rand_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl kyber_redistribute_16_rand_avx2
.type kyber_redistribute_16_rand_avx2,@function
.align 16
kyber_redistribute_16_rand_avx2:
#else
.section __TEXT,__text
.globl _kyber_redistribute_16_rand_avx2
.p2align 4
_kyber_redistribute_16_rand_avx2:
#endif /* __APPLE__ */
vmovdqu (%rdi), %ymm0
vmovdqu 32(%rdi), %ymm1
vmovdqu 64(%rdi), %ymm2
vmovdqu 96(%rdi), %ymm3
vmovdqu 128(%rdi), %ymm4
vmovdqu 160(%rdi), %ymm5
vmovdqu 192(%rdi), %ymm6
vmovdqu 224(%rdi), %ymm7
vpunpcklqdq %ymm1, %ymm0, %ymm8
vpunpckhqdq %ymm1, %ymm0, %ymm9
vpunpcklqdq %ymm3, %ymm2, %ymm10
vpunpckhqdq %ymm3, %ymm2, %ymm11
vperm2i128 $32, %ymm10, %ymm8, %ymm0
vperm2i128 $32, %ymm11, %ymm9, %ymm1
vperm2i128 $49, %ymm10, %ymm8, %ymm2
vperm2i128 $49, %ymm11, %ymm9, %ymm3
vpunpcklqdq %ymm5, %ymm4, %ymm8
vpunpckhqdq %ymm5, %ymm4, %ymm9
vpunpcklqdq %ymm7, %ymm6, %ymm10
vpunpckhqdq %ymm7, %ymm6, %ymm11
vperm2i128 $32, %ymm10, %ymm8, %ymm4
vperm2i128 $32, %ymm11, %ymm9, %ymm5
vperm2i128 $49, %ymm10, %ymm8, %ymm6
vperm2i128 $49, %ymm11, %ymm9, %ymm7
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm4, 32(%rsi)
vmovdqu %ymm1, (%rdx)
vmovdqu %ymm5, 32(%rdx)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm6, 32(%rcx)
vmovdqu %ymm3, (%r8)
vmovdqu %ymm7, 32(%r8)
vmovdqu 256(%rdi), %ymm0
vmovdqu 288(%rdi), %ymm1
vmovdqu 320(%rdi), %ymm2
vmovdqu 352(%rdi), %ymm3
vmovdqu 384(%rdi), %ymm4
vmovdqu 416(%rdi), %ymm5
vmovdqu 448(%rdi), %ymm6
vmovdqu 480(%rdi), %ymm7
vpunpcklqdq %ymm1, %ymm0, %ymm8
vpunpckhqdq %ymm1, %ymm0, %ymm9
vpunpcklqdq %ymm3, %ymm2, %ymm10
vpunpckhqdq %ymm3, %ymm2, %ymm11
vperm2i128 $32, %ymm10, %ymm8, %ymm0
vperm2i128 $32, %ymm11, %ymm9, %ymm1
vperm2i128 $49, %ymm10, %ymm8, %ymm2
vperm2i128 $49, %ymm11, %ymm9, %ymm3
vpunpcklqdq %ymm5, %ymm4, %ymm8
vpunpckhqdq %ymm5, %ymm4, %ymm9
vpunpcklqdq %ymm7, %ymm6, %ymm10
vpunpckhqdq %ymm7, %ymm6, %ymm11
vperm2i128 $32, %ymm10, %ymm8, %ymm4
vperm2i128 $32, %ymm11, %ymm9, %ymm5
vperm2i128 $49, %ymm10, %ymm8, %ymm6
vperm2i128 $49, %ymm11, %ymm9, %ymm7
vmovdqu %ymm0, 64(%rsi)
vmovdqu %ymm4, 96(%rsi)
vmovdqu %ymm1, 64(%rdx)
vmovdqu %ymm5, 96(%rdx)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm6, 96(%rcx)
vmovdqu %ymm3, 64(%r8)
vmovdqu %ymm7, 96(%r8)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_redistribute_16_rand_avx2,.-kyber_redistribute_16_rand_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.text
.globl kyber_redistribute_8_rand_avx2
.type kyber_redistribute_8_rand_avx2,@function
.align 16
kyber_redistribute_8_rand_avx2:
#else
.section __TEXT,__text
.globl _kyber_redistribute_8_rand_avx2
.p2align 4
_kyber_redistribute_8_rand_avx2:
#endif /* __APPLE__ */
vmovdqu (%rdi), %ymm0
vmovdqu 32(%rdi), %ymm1
vmovdqu 64(%rdi), %ymm2
vmovdqu 96(%rdi), %ymm3
vmovdqu 128(%rdi), %ymm4
vmovdqu 160(%rdi), %ymm5
vmovdqu 192(%rdi), %ymm6
vmovdqu 224(%rdi), %ymm7
vpunpcklqdq %ymm1, %ymm0, %ymm8
vpunpckhqdq %ymm1, %ymm0, %ymm9
vpunpcklqdq %ymm3, %ymm2, %ymm10
vpunpckhqdq %ymm3, %ymm2, %ymm11
vperm2i128 $32, %ymm10, %ymm8, %ymm0
vperm2i128 $32, %ymm11, %ymm9, %ymm1
vperm2i128 $49, %ymm10, %ymm8, %ymm2
vperm2i128 $49, %ymm11, %ymm9, %ymm3
vpunpcklqdq %ymm5, %ymm4, %ymm8
vpunpckhqdq %ymm5, %ymm4, %ymm9
vpunpcklqdq %ymm7, %ymm6, %ymm10
vpunpckhqdq %ymm7, %ymm6, %ymm11
vperm2i128 $32, %ymm10, %ymm8, %ymm4
vperm2i128 $32, %ymm11, %ymm9, %ymm5
vperm2i128 $49, %ymm10, %ymm8, %ymm6
vperm2i128 $49, %ymm11, %ymm9, %ymm7
vmovdqu %ymm0, (%rsi)
vmovdqu %ymm4, 32(%rsi)
vmovdqu %ymm1, (%rdx)
vmovdqu %ymm5, 32(%rdx)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm6, 32(%rcx)
vmovdqu %ymm3, (%r8)
vmovdqu %ymm7, 32(%r8)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_redistribute_8_rand_avx2,.-kyber_redistribute_8_rand_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 16
#else
.p2align 4
#endif /* __APPLE__ */
L_sha3_parallel_4_r:
.quad 0x1,0x1
.quad 0x1,0x1
.quad 0x8082,0x8082
.quad 0x8082,0x8082
.quad 0x800000000000808a,0x800000000000808a
.quad 0x800000000000808a,0x800000000000808a
.quad 0x8000000080008000,0x8000000080008000
.quad 0x8000000080008000,0x8000000080008000
.quad 0x808b,0x808b
.quad 0x808b,0x808b
.quad 0x80000001,0x80000001
.quad 0x80000001,0x80000001
.quad 0x8000000080008081,0x8000000080008081
.quad 0x8000000080008081,0x8000000080008081
.quad 0x8000000000008009,0x8000000000008009
.quad 0x8000000000008009,0x8000000000008009
.quad 0x8a,0x8a
.quad 0x8a,0x8a
.quad 0x88,0x88
.quad 0x88,0x88
.quad 0x80008009,0x80008009
.quad 0x80008009,0x80008009
.quad 0x8000000a,0x8000000a
.quad 0x8000000a,0x8000000a
.quad 0x8000808b,0x8000808b
.quad 0x8000808b,0x8000808b
.quad 0x800000000000008b,0x800000000000008b
.quad 0x800000000000008b,0x800000000000008b
.quad 0x8000000000008089,0x8000000000008089
.quad 0x8000000000008089,0x8000000000008089
.quad 0x8000000000008003,0x8000000000008003
.quad 0x8000000000008003,0x8000000000008003
.quad 0x8000000000008002,0x8000000000008002
.quad 0x8000000000008002,0x8000000000008002
.quad 0x8000000000000080,0x8000000000000080
.quad 0x8000000000000080,0x8000000000000080
.quad 0x800a,0x800a
.quad 0x800a,0x800a
.quad 0x800000008000000a,0x800000008000000a
.quad 0x800000008000000a,0x800000008000000a
.quad 0x8000000080008081,0x8000000080008081
.quad 0x8000000080008081,0x8000000080008081
.quad 0x8000000000008080,0x8000000000008080
.quad 0x8000000000008080,0x8000000000008080
.quad 0x80000001,0x80000001
.quad 0x80000001,0x80000001
.quad 0x8000000080008008,0x8000000080008008
.quad 0x8000000080008008,0x8000000080008008
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_sha3_128_blockx4_seed_avx2_end_mark:
.quad 0x8000000000000000, 0x8000000000000000
.quad 0x8000000000000000, 0x8000000000000000
#ifndef __APPLE__
.text
.globl kyber_sha3_128_blocksx4_seed_avx2
.type kyber_sha3_128_blocksx4_seed_avx2,@function
.align 16
kyber_sha3_128_blocksx4_seed_avx2:
#else
.section __TEXT,__text
.globl _kyber_sha3_128_blocksx4_seed_avx2
.p2align 4
_kyber_sha3_128_blocksx4_seed_avx2:
#endif /* __APPLE__ */
leaq L_sha3_parallel_4_r(%rip), %rdx
movq %rdi, %rax
movq %rdi, %rcx
vpbroadcastq (%rsi), %ymm15
addq $0x80, %rdi
vpbroadcastq 8(%rsi), %ymm11
addq $0x180, %rax
vpbroadcastq 16(%rsi), %ymm12
addq $0x280, %rcx
vpbroadcastq 24(%rsi), %ymm13
vmovdqu L_sha3_128_blockx4_seed_avx2_end_mark(%rip), %ymm5
vpxor %ymm6, %ymm6, %ymm6
vmovdqu %ymm11, -96(%rdi)
vmovdqu %ymm12, -64(%rdi)
vmovdqu %ymm13, -32(%rdi)
vmovdqu (%rdi), %ymm14
vmovdqu %ymm6, 32(%rdi)
vmovdqu %ymm6, 64(%rdi)
vmovdqu %ymm6, 96(%rdi)
vmovdqu %ymm6, 128(%rdi)
vmovdqu %ymm6, -96(%rax)
vmovdqu %ymm6, -64(%rax)
vmovdqu %ymm6, -32(%rax)
vmovdqu %ymm6, (%rax)
vmovdqu %ymm6, 32(%rax)
vmovdqu %ymm6, 64(%rax)
vmovdqu %ymm6, 96(%rax)
vmovdqu %ymm6, 128(%rax)
vmovdqu %ymm6, -96(%rcx)
vmovdqu %ymm6, -64(%rcx)
vmovdqu %ymm6, -32(%rcx)
vmovdqu %ymm5, (%rcx)
vmovdqu %ymm6, 32(%rcx)
vmovdqu %ymm6, 64(%rcx)
vmovdqu %ymm6, 96(%rcx)
vmovdqu %ymm6, 128(%rcx)
vpxor %ymm5, %ymm15, %ymm10
# Round 0
# Calc b[0..4]
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 64(%rdi), %ymm6, %ymm11
vpxor (%rax), %ymm7, %ymm12
vpxor -64(%rcx), %ymm8, %ymm13
vpxor 128(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor (%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, 128(%rcx)
# Row 1
vpxor -32(%rdi), %ymm8, %ymm10
vpxor -96(%rax), %ymm9, %ymm11
vpxor -64(%rax), %ymm5, %ymm12
vpxor 128(%rax), %ymm6, %ymm13
vpxor 64(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rdi)
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, 64(%rcx)
# Row 2
vpxor -96(%rdi), %ymm6, %ymm10
vpxor 96(%rdi), %ymm7, %ymm11
vpxor 32(%rax), %ymm8, %ymm12
vpxor -32(%rcx), %ymm9, %ymm13
vpxor (%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rdi)
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, (%rcx)
# Row 3
vpxor (%rdi), %ymm9, %ymm10
vpxor 32(%rdi), %ymm5, %ymm11
vpxor -32(%rax), %ymm6, %ymm12
vpxor -96(%rcx), %ymm7, %ymm13
vpxor 96(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, 96(%rcx)
# Row 4
vpxor -64(%rdi), %ymm7, %ymm10
vpxor 128(%rdi), %ymm8, %ymm11
vpxor 64(%rax), %ymm9, %ymm12
vpxor 96(%rax), %ymm5, %ymm13
vpxor 32(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rdi)
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, 32(%rcx)
# Round 1
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm10, %ymm10
vpxor -32(%rdi), %ymm10, %ymm10
vpxor (%rdi), %ymm10, %ymm10
vpxor 32(%rdi), %ymm1, %ymm11
vpxor 64(%rdi), %ymm11, %ymm11
vpxor 96(%rdi), %ymm11, %ymm11
vpxor -96(%rax), %ymm11, %ymm11
vpxor -64(%rax), %ymm2, %ymm12
vpxor -32(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm12, %ymm12
vpxor 32(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm3, %ymm13
vpxor -96(%rcx), %ymm13, %ymm13
vpxor -64(%rcx), %ymm13, %ymm13
vpxor -32(%rcx), %ymm13, %ymm13
vpxor (%rcx), %ymm4, %ymm14
vpxor 64(%rcx), %ymm14, %ymm14
vpxor 96(%rcx), %ymm14, %ymm14
vpxor 128(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -96(%rax), %ymm6, %ymm11
vpxor 32(%rax), %ymm7, %ymm12
vpxor -96(%rcx), %ymm8, %ymm13
vpxor 32(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 32(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, 32(%rcx)
# Row 1
vpxor -64(%rcx), %ymm8, %ymm10
vpxor 64(%rcx), %ymm9, %ymm11
vpxor -96(%rdi), %ymm5, %ymm12
vpxor 32(%rdi), %ymm6, %ymm13
vpxor 64(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rcx)
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, 64(%rax)
# Row 2
vpxor 64(%rdi), %ymm6, %ymm10
vpxor -64(%rax), %ymm7, %ymm11
vpxor -32(%rcx), %ymm8, %ymm12
vpxor 96(%rcx), %ymm9, %ymm13
vpxor -64(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, -64(%rdi)
# Row 3
vpxor 128(%rcx), %ymm9, %ymm10
vpxor -32(%rdi), %ymm5, %ymm11
vpxor 96(%rdi), %ymm6, %ymm12
vpxor -32(%rax), %ymm7, %ymm13
vpxor 96(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rcx)
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, 96(%rax)
# Row 4
vpxor (%rax), %ymm7, %ymm10
vpxor 128(%rax), %ymm8, %ymm11
vpxor (%rcx), %ymm9, %ymm12
vpxor (%rdi), %ymm5, %ymm13
vpxor 128(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rax)
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, 128(%rdi)
# Round 2
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm2, %ymm12
vpxor -64(%rdi), %ymm4, %ymm14
vpxor -32(%rdi), %ymm1, %ymm11
vpxor 32(%rdi), %ymm3, %ymm13
vpxor 64(%rdi), %ymm10, %ymm10
vpxor 96(%rdi), %ymm12, %ymm12
vpxor -96(%rax), %ymm11, %ymm11
vpxor -64(%rax), %ymm11, %ymm11
vpxor -32(%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm12, %ymm12
vpxor 64(%rax), %ymm14, %ymm14
vpxor 96(%rax), %ymm14, %ymm14
vpxor -96(%rcx), %ymm13, %ymm13
vpxor -64(%rcx), %ymm10, %ymm10
vpxor -32(%rcx), %ymm12, %ymm12
vpxor 32(%rcx), %ymm14, %ymm14
vpxor 64(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm13, %ymm13
vpxor 128(%rcx), %ymm10, %ymm10
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 64(%rcx), %ymm6, %ymm11
vpxor -32(%rcx), %ymm7, %ymm12
vpxor -32(%rax), %ymm8, %ymm13
vpxor 128(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 64(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, 128(%rdi)
# Row 1
vpxor -96(%rcx), %ymm8, %ymm10
vpxor 64(%rax), %ymm9, %ymm11
vpxor 64(%rdi), %ymm5, %ymm12
vpxor -32(%rdi), %ymm6, %ymm13
vpxor (%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rcx)
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, (%rcx)
# Row 2
vpxor -96(%rax), %ymm6, %ymm10
vpxor -96(%rdi), %ymm7, %ymm11
vpxor 96(%rcx), %ymm8, %ymm12
vpxor 96(%rax), %ymm9, %ymm13
vpxor (%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rax)
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, (%rax)
# Row 3
vpxor 32(%rcx), %ymm9, %ymm10
vpxor -64(%rcx), %ymm5, %ymm11
vpxor -64(%rax), %ymm6, %ymm12
vpxor 96(%rdi), %ymm7, %ymm13
vpxor (%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rcx)
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, (%rdi)
# Row 4
vpxor 32(%rax), %ymm7, %ymm10
vpxor 32(%rdi), %ymm8, %ymm11
vpxor -64(%rdi), %ymm9, %ymm12
vpxor 128(%rcx), %ymm5, %ymm13
vpxor 128(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rax)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, 128(%rax)
# Round 3
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm1, %ymm11
vpxor -32(%rdi), %ymm3, %ymm13
vpxor (%rdi), %ymm4, %ymm14
vpxor 64(%rdi), %ymm2, %ymm12
vpxor 96(%rdi), %ymm13, %ymm13
vpxor 128(%rdi), %ymm14, %ymm14
vpxor -96(%rax), %ymm10, %ymm10
vpxor -64(%rax), %ymm12, %ymm12
vpxor -32(%rax), %ymm13, %ymm13
vpxor (%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm11, %ymm11
vpxor 96(%rax), %ymm13, %ymm13
vpxor -96(%rcx), %ymm10, %ymm10
vpxor -64(%rcx), %ymm11, %ymm11
vpxor -32(%rcx), %ymm12, %ymm12
vpxor (%rcx), %ymm14, %ymm14
vpxor 32(%rcx), %ymm10, %ymm10
vpxor 64(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 64(%rax), %ymm6, %ymm11
vpxor 96(%rcx), %ymm7, %ymm12
vpxor 96(%rdi), %ymm8, %ymm13
vpxor 128(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 96(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, 128(%rax)
# Row 1
vpxor -32(%rax), %ymm8, %ymm10
vpxor (%rcx), %ymm9, %ymm11
vpxor -96(%rax), %ymm5, %ymm12
vpxor -64(%rcx), %ymm6, %ymm13
vpxor -64(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rax)
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, -64(%rdi)
# Row 2
vpxor 64(%rcx), %ymm6, %ymm10
vpxor 64(%rdi), %ymm7, %ymm11
vpxor 96(%rax), %ymm8, %ymm12
vpxor (%rdi), %ymm9, %ymm13
vpxor 32(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rcx)
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, 32(%rax)
# Row 3
vpxor 128(%rdi), %ymm9, %ymm10
vpxor -96(%rcx), %ymm5, %ymm11
vpxor -96(%rdi), %ymm6, %ymm12
vpxor -64(%rax), %ymm7, %ymm13
vpxor 128(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, 128(%rcx)
# Row 4
vpxor -32(%rcx), %ymm7, %ymm10
vpxor -32(%rdi), %ymm8, %ymm11
vpxor (%rax), %ymm9, %ymm12
vpxor 32(%rcx), %ymm5, %ymm13
vpxor 32(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rcx)
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 32(%rdi)
# Round 4
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm2, %ymm12
vpxor -64(%rdi), %ymm4, %ymm14
vpxor (%rdi), %ymm3, %ymm13
vpxor 64(%rdi), %ymm1, %ymm11
vpxor 96(%rdi), %ymm13, %ymm13
vpxor 128(%rdi), %ymm10, %ymm10
vpxor -96(%rax), %ymm12, %ymm12
vpxor -64(%rax), %ymm13, %ymm13
vpxor -32(%rax), %ymm10, %ymm10
vpxor 32(%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm11, %ymm11
vpxor 96(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm14, %ymm14
vpxor -96(%rcx), %ymm11, %ymm11
vpxor -64(%rcx), %ymm13, %ymm13
vpxor (%rcx), %ymm11, %ymm11
vpxor 64(%rcx), %ymm10, %ymm10
vpxor 96(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor (%rcx), %ymm6, %ymm11
vpxor 96(%rax), %ymm7, %ymm12
vpxor -64(%rax), %ymm8, %ymm13
vpxor 32(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 128(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, 32(%rdi)
# Row 1
vpxor 96(%rdi), %ymm8, %ymm10
vpxor -64(%rdi), %ymm9, %ymm11
vpxor 64(%rcx), %ymm5, %ymm12
vpxor -96(%rcx), %ymm6, %ymm13
vpxor (%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, (%rax)
# Row 2
vpxor 64(%rax), %ymm6, %ymm10
vpxor -96(%rax), %ymm7, %ymm11
vpxor (%rdi), %ymm8, %ymm12
vpxor 128(%rcx), %ymm9, %ymm13
vpxor -32(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rax)
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, -32(%rcx)
# Row 3
vpxor 128(%rax), %ymm9, %ymm10
vpxor -32(%rax), %ymm5, %ymm11
vpxor 64(%rdi), %ymm6, %ymm12
vpxor -96(%rdi), %ymm7, %ymm13
vpxor 32(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rax)
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, 32(%rcx)
# Row 4
vpxor 96(%rcx), %ymm7, %ymm10
vpxor -64(%rcx), %ymm8, %ymm11
vpxor 32(%rax), %ymm9, %ymm12
vpxor 128(%rdi), %ymm5, %ymm13
vpxor -32(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rcx)
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, -32(%rdi)
# Round 5
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm3, %ymm13
vpxor -64(%rdi), %ymm1, %ymm11
vpxor (%rdi), %ymm2, %ymm12
vpxor 32(%rdi), %ymm4, %ymm14
vpxor 64(%rdi), %ymm12, %ymm12
vpxor 96(%rdi), %ymm10, %ymm10
vpxor -96(%rax), %ymm11, %ymm11
vpxor -64(%rax), %ymm13, %ymm13
vpxor -32(%rax), %ymm11, %ymm11
vpxor (%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm10, %ymm10
vpxor 96(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm10, %ymm10
vpxor -96(%rcx), %ymm13, %ymm13
vpxor -32(%rcx), %ymm14, %ymm14
vpxor (%rcx), %ymm11, %ymm11
vpxor 32(%rcx), %ymm14, %ymm14
vpxor 64(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm13, %ymm13
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -64(%rdi), %ymm6, %ymm11
vpxor (%rdi), %ymm7, %ymm12
vpxor -96(%rdi), %ymm8, %ymm13
vpxor -32(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 160(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, -32(%rdi)
# Row 1
vpxor -64(%rax), %ymm8, %ymm10
vpxor (%rax), %ymm9, %ymm11
vpxor 64(%rax), %ymm5, %ymm12
vpxor -32(%rax), %ymm6, %ymm13
vpxor 32(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rax)
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, 32(%rax)
# Row 2
vpxor (%rcx), %ymm6, %ymm10
vpxor 64(%rcx), %ymm7, %ymm11
vpxor 128(%rcx), %ymm8, %ymm12
vpxor 32(%rcx), %ymm9, %ymm13
vpxor 96(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rcx)
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 96(%rcx)
# Row 3
vpxor 32(%rdi), %ymm9, %ymm10
vpxor 96(%rdi), %ymm5, %ymm11
vpxor -96(%rax), %ymm6, %ymm12
vpxor 64(%rdi), %ymm7, %ymm13
vpxor 128(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rdi)
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, 128(%rdi)
# Row 4
vpxor 96(%rax), %ymm7, %ymm10
vpxor -96(%rcx), %ymm8, %ymm11
vpxor -32(%rcx), %ymm9, %ymm12
vpxor 128(%rax), %ymm5, %ymm13
vpxor -64(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rax)
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, -64(%rcx)
# Round 6
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm3, %ymm13
vpxor -64(%rdi), %ymm1, %ymm11
vpxor -32(%rdi), %ymm4, %ymm14
vpxor (%rdi), %ymm2, %ymm12
vpxor 32(%rdi), %ymm10, %ymm10
vpxor 64(%rdi), %ymm13, %ymm13
vpxor 96(%rdi), %ymm11, %ymm11
vpxor 128(%rdi), %ymm14, %ymm14
vpxor -96(%rax), %ymm12, %ymm12
vpxor -64(%rax), %ymm10, %ymm10
vpxor -32(%rax), %ymm13, %ymm13
vpxor (%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm12, %ymm12
vpxor (%rcx), %ymm10, %ymm10
vpxor 32(%rcx), %ymm13, %ymm13
vpxor 64(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm14, %ymm14
vpxor 128(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor (%rax), %ymm6, %ymm11
vpxor 128(%rcx), %ymm7, %ymm12
vpxor 64(%rdi), %ymm8, %ymm13
vpxor -64(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 192(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, -64(%rcx)
# Row 1
vpxor -96(%rdi), %ymm8, %ymm10
vpxor 32(%rax), %ymm9, %ymm11
vpxor (%rcx), %ymm5, %ymm12
vpxor 96(%rdi), %ymm6, %ymm13
vpxor -32(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rdi)
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, -32(%rcx)
# Row 2
vpxor -64(%rdi), %ymm6, %ymm10
vpxor 64(%rax), %ymm7, %ymm11
vpxor 32(%rcx), %ymm8, %ymm12
vpxor 128(%rdi), %ymm9, %ymm13
vpxor 96(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rdi)
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, 96(%rax)
# Row 3
vpxor -32(%rdi), %ymm9, %ymm10
vpxor -64(%rax), %ymm5, %ymm11
vpxor 64(%rcx), %ymm6, %ymm12
vpxor -96(%rax), %ymm7, %ymm13
vpxor 128(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rdi)
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, 128(%rax)
# Row 4
vpxor (%rdi), %ymm7, %ymm10
vpxor -32(%rax), %ymm8, %ymm11
vpxor 96(%rcx), %ymm9, %ymm12
vpxor 32(%rdi), %ymm5, %ymm13
vpxor -96(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, -96(%rcx)
# Round 7
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm10, %ymm10
vpxor -64(%rdi), %ymm10, %ymm10
vpxor -32(%rdi), %ymm10, %ymm10
vpxor 64(%rdi), %ymm3, %ymm13
vpxor 96(%rdi), %ymm13, %ymm13
vpxor 128(%rdi), %ymm13, %ymm13
vpxor -96(%rax), %ymm13, %ymm13
vpxor -64(%rax), %ymm1, %ymm11
vpxor (%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm11, %ymm11
vpxor 64(%rax), %ymm11, %ymm11
vpxor 96(%rax), %ymm4, %ymm14
vpxor 128(%rax), %ymm14, %ymm14
vpxor -64(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm14, %ymm14
vpxor (%rcx), %ymm2, %ymm12
vpxor 32(%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 32(%rax), %ymm6, %ymm11
vpxor 32(%rcx), %ymm7, %ymm12
vpxor -96(%rax), %ymm8, %ymm13
vpxor -96(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 224(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, -96(%rcx)
# Row 1
vpxor 64(%rdi), %ymm8, %ymm10
vpxor -32(%rcx), %ymm9, %ymm11
vpxor -64(%rdi), %ymm5, %ymm12
vpxor -64(%rax), %ymm6, %ymm13
vpxor 96(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, 96(%rcx)
# Row 2
vpxor (%rax), %ymm6, %ymm10
vpxor (%rcx), %ymm7, %ymm11
vpxor 128(%rdi), %ymm8, %ymm12
vpxor 128(%rax), %ymm9, %ymm13
vpxor (%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rax)
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, (%rdi)
# Row 3
vpxor -64(%rcx), %ymm9, %ymm10
vpxor -96(%rdi), %ymm5, %ymm11
vpxor 64(%rax), %ymm6, %ymm12
vpxor 64(%rcx), %ymm7, %ymm13
vpxor 32(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rcx)
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, 32(%rdi)
# Row 4
vpxor 128(%rcx), %ymm7, %ymm10
vpxor 96(%rdi), %ymm8, %ymm11
vpxor 96(%rax), %ymm9, %ymm12
vpxor -32(%rdi), %ymm5, %ymm13
vpxor -32(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rcx)
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, -32(%rax)
# Round 8
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm1, %ymm11
vpxor -64(%rdi), %ymm2, %ymm12
vpxor (%rdi), %ymm4, %ymm14
vpxor 32(%rdi), %ymm14, %ymm14
vpxor 64(%rdi), %ymm10, %ymm10
vpxor 128(%rdi), %ymm12, %ymm12
vpxor -96(%rax), %ymm3, %ymm13
vpxor -64(%rax), %ymm13, %ymm13
vpxor (%rax), %ymm10, %ymm10
vpxor 32(%rax), %ymm11, %ymm11
vpxor 64(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm13, %ymm13
vpxor -96(%rcx), %ymm14, %ymm14
vpxor -64(%rcx), %ymm10, %ymm10
vpxor -32(%rcx), %ymm11, %ymm11
vpxor (%rcx), %ymm11, %ymm11
vpxor 32(%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm13, %ymm13
vpxor 96(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -32(%rcx), %ymm6, %ymm11
vpxor 128(%rdi), %ymm7, %ymm12
vpxor 64(%rcx), %ymm8, %ymm13
vpxor -32(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 256(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, -32(%rax)
# Row 1
vpxor -96(%rax), %ymm8, %ymm10
vpxor 96(%rcx), %ymm9, %ymm11
vpxor (%rax), %ymm5, %ymm12
vpxor -96(%rdi), %ymm6, %ymm13
vpxor 96(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rax)
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, 96(%rax)
# Row 2
vpxor 32(%rax), %ymm6, %ymm10
vpxor -64(%rdi), %ymm7, %ymm11
vpxor 128(%rax), %ymm8, %ymm12
vpxor 32(%rdi), %ymm9, %ymm13
vpxor 128(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rax)
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, 128(%rcx)
# Row 3
vpxor -96(%rcx), %ymm9, %ymm10
vpxor 64(%rdi), %ymm5, %ymm11
vpxor (%rcx), %ymm6, %ymm12
vpxor 64(%rax), %ymm7, %ymm13
vpxor -32(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rcx)
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, -32(%rdi)
# Row 4
vpxor 32(%rcx), %ymm7, %ymm10
vpxor -64(%rax), %ymm8, %ymm11
vpxor (%rdi), %ymm9, %ymm12
vpxor -64(%rcx), %ymm5, %ymm13
vpxor 96(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rcx)
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, 96(%rdi)
# Round 9
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm3, %ymm13
vpxor -64(%rdi), %ymm1, %ymm11
vpxor -32(%rdi), %ymm4, %ymm14
vpxor 32(%rdi), %ymm13, %ymm13
vpxor 64(%rdi), %ymm11, %ymm11
vpxor 128(%rdi), %ymm2, %ymm12
vpxor -96(%rax), %ymm10, %ymm10
vpxor -32(%rax), %ymm14, %ymm14
vpxor (%rax), %ymm12, %ymm12
vpxor 32(%rax), %ymm10, %ymm10
vpxor 64(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm14, %ymm14
vpxor 128(%rax), %ymm12, %ymm12
vpxor -96(%rcx), %ymm10, %ymm10
vpxor -32(%rcx), %ymm11, %ymm11
vpxor (%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm13, %ymm13
vpxor 96(%rcx), %ymm11, %ymm11
vpxor 128(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 96(%rcx), %ymm6, %ymm11
vpxor 128(%rax), %ymm7, %ymm12
vpxor 64(%rax), %ymm8, %ymm13
vpxor 96(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 288(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, 96(%rdi)
# Row 1
vpxor 64(%rcx), %ymm8, %ymm10
vpxor 96(%rax), %ymm9, %ymm11
vpxor 32(%rax), %ymm5, %ymm12
vpxor 64(%rdi), %ymm6, %ymm13
vpxor (%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rcx)
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, (%rdi)
# Row 2
vpxor -32(%rcx), %ymm6, %ymm10
vpxor (%rax), %ymm7, %ymm11
vpxor 32(%rdi), %ymm8, %ymm12
vpxor -32(%rdi), %ymm9, %ymm13
vpxor 32(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rcx)
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, 32(%rcx)
# Row 3
vpxor -32(%rax), %ymm9, %ymm10
vpxor -96(%rax), %ymm5, %ymm11
vpxor -64(%rdi), %ymm6, %ymm12
vpxor (%rcx), %ymm7, %ymm13
vpxor -64(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rax)
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, -64(%rcx)
# Row 4
vpxor 128(%rdi), %ymm7, %ymm10
vpxor -96(%rdi), %ymm8, %ymm11
vpxor 128(%rcx), %ymm9, %ymm12
vpxor -96(%rcx), %ymm5, %ymm13
vpxor -64(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, -64(%rax)
# Round 10
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm2, %ymm12
vpxor -32(%rdi), %ymm3, %ymm13
vpxor (%rdi), %ymm4, %ymm14
vpxor 32(%rdi), %ymm12, %ymm12
vpxor 64(%rdi), %ymm13, %ymm13
vpxor 96(%rdi), %ymm14, %ymm14
vpxor -96(%rax), %ymm1, %ymm11
vpxor -32(%rax), %ymm10, %ymm10
vpxor (%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm12, %ymm12
vpxor 64(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm11, %ymm11
vpxor 128(%rax), %ymm12, %ymm12
vpxor -64(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm10, %ymm10
vpxor (%rcx), %ymm13, %ymm13
vpxor 32(%rcx), %ymm14, %ymm14
vpxor 64(%rcx), %ymm10, %ymm10
vpxor 96(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 96(%rax), %ymm6, %ymm11
vpxor 32(%rdi), %ymm7, %ymm12
vpxor (%rcx), %ymm8, %ymm13
vpxor -64(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 320(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, -64(%rax)
# Row 1
vpxor 64(%rax), %ymm8, %ymm10
vpxor (%rdi), %ymm9, %ymm11
vpxor -32(%rcx), %ymm5, %ymm12
vpxor -96(%rax), %ymm6, %ymm13
vpxor 128(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rax)
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, 128(%rcx)
# Row 2
vpxor 96(%rcx), %ymm6, %ymm10
vpxor 32(%rax), %ymm7, %ymm11
vpxor -32(%rdi), %ymm8, %ymm12
vpxor -64(%rcx), %ymm9, %ymm13
vpxor 128(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rcx)
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, 128(%rdi)
# Row 3
vpxor 96(%rdi), %ymm9, %ymm10
vpxor 64(%rcx), %ymm5, %ymm11
vpxor (%rax), %ymm6, %ymm12
vpxor -64(%rdi), %ymm7, %ymm13
vpxor -96(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, -96(%rcx)
# Row 4
vpxor 128(%rax), %ymm7, %ymm10
vpxor 64(%rdi), %ymm8, %ymm11
vpxor 32(%rcx), %ymm9, %ymm12
vpxor -32(%rax), %ymm5, %ymm13
vpxor -96(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rax)
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, -96(%rdi)
# Round 11
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm3, %ymm13
vpxor -32(%rdi), %ymm2, %ymm12
vpxor (%rdi), %ymm1, %ymm11
vpxor 32(%rdi), %ymm12, %ymm12
vpxor 96(%rdi), %ymm10, %ymm10
vpxor 128(%rdi), %ymm4, %ymm14
vpxor -96(%rax), %ymm13, %ymm13
vpxor -64(%rax), %ymm14, %ymm14
vpxor (%rax), %ymm12, %ymm12
vpxor 32(%rax), %ymm11, %ymm11
vpxor 64(%rax), %ymm10, %ymm10
vpxor 96(%rax), %ymm11, %ymm11
vpxor -96(%rcx), %ymm14, %ymm14
vpxor -64(%rcx), %ymm13, %ymm13
vpxor -32(%rcx), %ymm12, %ymm12
vpxor (%rcx), %ymm13, %ymm13
vpxor 64(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm10, %ymm10
vpxor 128(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor (%rdi), %ymm6, %ymm11
vpxor -32(%rdi), %ymm7, %ymm12
vpxor -64(%rdi), %ymm8, %ymm13
vpxor -96(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 352(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, -96(%rdi)
# Row 1
vpxor (%rcx), %ymm8, %ymm10
vpxor 128(%rcx), %ymm9, %ymm11
vpxor 96(%rcx), %ymm5, %ymm12
vpxor 64(%rcx), %ymm6, %ymm13
vpxor 32(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rcx)
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, 32(%rcx)
# Row 2
vpxor 96(%rax), %ymm6, %ymm10
vpxor -32(%rcx), %ymm7, %ymm11
vpxor -64(%rcx), %ymm8, %ymm12
vpxor -96(%rcx), %ymm9, %ymm13
vpxor 128(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rax)
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, 128(%rax)
# Row 3
vpxor -64(%rax), %ymm9, %ymm10
vpxor 64(%rax), %ymm5, %ymm11
vpxor 32(%rax), %ymm6, %ymm12
vpxor (%rax), %ymm7, %ymm13
vpxor -32(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rax)
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, -32(%rax)
# Row 4
vpxor 32(%rdi), %ymm7, %ymm10
vpxor -96(%rax), %ymm8, %ymm11
vpxor 128(%rdi), %ymm9, %ymm12
vpxor 96(%rdi), %ymm5, %ymm13
vpxor 64(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rdi)
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, 64(%rdi)
# Round 12
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm4, %ymm14
vpxor -64(%rdi), %ymm3, %ymm13
vpxor -32(%rdi), %ymm2, %ymm12
vpxor (%rdi), %ymm1, %ymm11
vpxor -64(%rax), %ymm10, %ymm10
vpxor -32(%rax), %ymm14, %ymm14
vpxor (%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm12, %ymm12
vpxor 64(%rax), %ymm11, %ymm11
vpxor 96(%rax), %ymm10, %ymm10
vpxor 128(%rax), %ymm14, %ymm14
vpxor -96(%rcx), %ymm13, %ymm13
vpxor -64(%rcx), %ymm12, %ymm12
vpxor -32(%rcx), %ymm11, %ymm11
vpxor (%rcx), %ymm10, %ymm10
vpxor 32(%rcx), %ymm14, %ymm14
vpxor 64(%rcx), %ymm13, %ymm13
vpxor 96(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 128(%rcx), %ymm6, %ymm11
vpxor -64(%rcx), %ymm7, %ymm12
vpxor (%rax), %ymm8, %ymm13
vpxor 64(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 384(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, 64(%rdi)
# Row 1
vpxor -64(%rdi), %ymm8, %ymm10
vpxor 32(%rcx), %ymm9, %ymm11
vpxor 96(%rax), %ymm5, %ymm12
vpxor 64(%rax), %ymm6, %ymm13
vpxor 128(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rdi)
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, 128(%rdi)
# Row 2
vpxor (%rdi), %ymm6, %ymm10
vpxor 96(%rcx), %ymm7, %ymm11
vpxor -96(%rcx), %ymm8, %ymm12
vpxor -32(%rax), %ymm9, %ymm13
vpxor 32(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, 32(%rdi)
# Row 3
vpxor -96(%rdi), %ymm9, %ymm10
vpxor (%rcx), %ymm5, %ymm11
vpxor -32(%rcx), %ymm6, %ymm12
vpxor 32(%rax), %ymm7, %ymm13
vpxor 96(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rdi)
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, 96(%rdi)
# Row 4
vpxor -32(%rdi), %ymm7, %ymm10
vpxor 64(%rcx), %ymm8, %ymm11
vpxor 128(%rax), %ymm9, %ymm12
vpxor -64(%rax), %ymm5, %ymm13
vpxor -96(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rdi)
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, -96(%rax)
# Round 13
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm10, %ymm10
vpxor -64(%rdi), %ymm10, %ymm10
vpxor (%rdi), %ymm10, %ymm10
vpxor 32(%rdi), %ymm4, %ymm14
vpxor 64(%rdi), %ymm14, %ymm14
vpxor 96(%rdi), %ymm14, %ymm14
vpxor 128(%rdi), %ymm14, %ymm14
vpxor -32(%rax), %ymm3, %ymm13
vpxor (%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm13, %ymm13
vpxor 64(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm2, %ymm12
vpxor -96(%rcx), %ymm12, %ymm12
vpxor -64(%rcx), %ymm12, %ymm12
vpxor -32(%rcx), %ymm12, %ymm12
vpxor (%rcx), %ymm1, %ymm11
vpxor 32(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm11, %ymm11
vpxor 128(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 32(%rcx), %ymm6, %ymm11
vpxor -96(%rcx), %ymm7, %ymm12
vpxor 32(%rax), %ymm8, %ymm13
vpxor -96(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 416(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, -96(%rax)
# Row 1
vpxor (%rax), %ymm8, %ymm10
vpxor 128(%rdi), %ymm9, %ymm11
vpxor (%rdi), %ymm5, %ymm12
vpxor (%rcx), %ymm6, %ymm13
vpxor 128(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rax)
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, 128(%rax)
# Row 2
vpxor 128(%rcx), %ymm6, %ymm10
vpxor 96(%rax), %ymm7, %ymm11
vpxor -32(%rax), %ymm8, %ymm12
vpxor 96(%rdi), %ymm9, %ymm13
vpxor -32(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rcx)
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, -32(%rdi)
# Row 3
vpxor 64(%rdi), %ymm9, %ymm10
vpxor -64(%rdi), %ymm5, %ymm11
vpxor 96(%rcx), %ymm6, %ymm12
vpxor -32(%rcx), %ymm7, %ymm13
vpxor -64(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, -64(%rax)
# Row 4
vpxor -64(%rcx), %ymm7, %ymm10
vpxor 64(%rax), %ymm8, %ymm11
vpxor 32(%rdi), %ymm9, %ymm12
vpxor -96(%rdi), %ymm5, %ymm13
vpxor 64(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rcx)
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, 64(%rcx)
# Round 14
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm1, %ymm11
vpxor -32(%rdi), %ymm4, %ymm14
vpxor (%rdi), %ymm2, %ymm12
vpxor 64(%rdi), %ymm10, %ymm10
vpxor 96(%rdi), %ymm3, %ymm13
vpxor 128(%rdi), %ymm11, %ymm11
vpxor -96(%rax), %ymm14, %ymm14
vpxor -64(%rax), %ymm14, %ymm14
vpxor -32(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm10, %ymm10
vpxor 32(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm11, %ymm11
vpxor 128(%rax), %ymm14, %ymm14
vpxor -96(%rcx), %ymm12, %ymm12
vpxor -32(%rcx), %ymm13, %ymm13
vpxor (%rcx), %ymm13, %ymm13
vpxor 32(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm10, %ymm10
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 128(%rdi), %ymm6, %ymm11
vpxor -32(%rax), %ymm7, %ymm12
vpxor -32(%rcx), %ymm8, %ymm13
vpxor 64(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 448(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, 64(%rcx)
# Row 1
vpxor 32(%rax), %ymm8, %ymm10
vpxor 128(%rax), %ymm9, %ymm11
vpxor 128(%rcx), %ymm5, %ymm12
vpxor -64(%rdi), %ymm6, %ymm13
vpxor 32(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rax)
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, 32(%rdi)
# Row 2
vpxor 32(%rcx), %ymm6, %ymm10
vpxor (%rdi), %ymm7, %ymm11
vpxor 96(%rdi), %ymm8, %ymm12
vpxor -64(%rax), %ymm9, %ymm13
vpxor -64(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rcx)
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, -64(%rcx)
# Row 3
vpxor -96(%rax), %ymm9, %ymm10
vpxor (%rax), %ymm5, %ymm11
vpxor 96(%rax), %ymm6, %ymm12
vpxor 96(%rcx), %ymm7, %ymm13
vpxor -96(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rax)
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, -96(%rdi)
# Row 4
vpxor -96(%rcx), %ymm7, %ymm10
vpxor (%rcx), %ymm8, %ymm11
vpxor -32(%rdi), %ymm9, %ymm12
vpxor 64(%rdi), %ymm5, %ymm13
vpxor 64(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rcx)
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, 64(%rax)
# Round 15
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm4, %ymm14
vpxor -64(%rdi), %ymm3, %ymm13
vpxor (%rdi), %ymm1, %ymm11
vpxor 32(%rdi), %ymm14, %ymm14
vpxor 96(%rdi), %ymm2, %ymm12
vpxor 128(%rdi), %ymm11, %ymm11
vpxor -96(%rax), %ymm10, %ymm10
vpxor -64(%rax), %ymm13, %ymm13
vpxor -32(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm10, %ymm10
vpxor 96(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm11, %ymm11
vpxor -64(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm13, %ymm13
vpxor 32(%rcx), %ymm10, %ymm10
vpxor 64(%rcx), %ymm14, %ymm14
vpxor 96(%rcx), %ymm13, %ymm13
vpxor 128(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 128(%rax), %ymm6, %ymm11
vpxor 96(%rdi), %ymm7, %ymm12
vpxor 96(%rcx), %ymm8, %ymm13
vpxor 64(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 480(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, 64(%rax)
# Row 1
vpxor -32(%rcx), %ymm8, %ymm10
vpxor 32(%rdi), %ymm9, %ymm11
vpxor 32(%rcx), %ymm5, %ymm12
vpxor (%rax), %ymm6, %ymm13
vpxor -32(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rcx)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, -32(%rdi)
# Row 2
vpxor 128(%rdi), %ymm6, %ymm10
vpxor 128(%rcx), %ymm7, %ymm11
vpxor -64(%rax), %ymm8, %ymm12
vpxor -96(%rdi), %ymm9, %ymm13
vpxor -96(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, -96(%rcx)
# Row 3
vpxor 64(%rcx), %ymm9, %ymm10
vpxor 32(%rax), %ymm5, %ymm11
vpxor (%rdi), %ymm6, %ymm12
vpxor 96(%rax), %ymm7, %ymm13
vpxor 64(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rcx)
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, 64(%rdi)
# Row 4
vpxor -32(%rax), %ymm7, %ymm10
vpxor -64(%rdi), %ymm8, %ymm11
vpxor -64(%rcx), %ymm9, %ymm12
vpxor -96(%rax), %ymm5, %ymm13
vpxor (%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rax)
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, (%rcx)
# Round 16
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm3, %ymm13
vpxor -32(%rdi), %ymm4, %ymm14
vpxor (%rdi), %ymm2, %ymm12
vpxor 32(%rdi), %ymm1, %ymm11
vpxor 64(%rdi), %ymm14, %ymm14
vpxor 96(%rdi), %ymm12, %ymm12
vpxor 128(%rdi), %ymm10, %ymm10
vpxor -64(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm11, %ymm11
vpxor 64(%rax), %ymm14, %ymm14
vpxor 96(%rax), %ymm13, %ymm13
vpxor 128(%rax), %ymm11, %ymm11
vpxor -96(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm10, %ymm10
vpxor 32(%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm10, %ymm10
vpxor 96(%rcx), %ymm13, %ymm13
vpxor 128(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 32(%rdi), %ymm6, %ymm11
vpxor -64(%rax), %ymm7, %ymm12
vpxor 96(%rax), %ymm8, %ymm13
vpxor (%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 512(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, (%rcx)
# Row 1
vpxor 96(%rcx), %ymm8, %ymm10
vpxor -32(%rdi), %ymm9, %ymm11
vpxor 128(%rdi), %ymm5, %ymm12
vpxor 32(%rax), %ymm6, %ymm13
vpxor -64(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rcx)
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, -64(%rcx)
# Row 2
vpxor 128(%rax), %ymm6, %ymm10
vpxor 32(%rcx), %ymm7, %ymm11
vpxor -96(%rdi), %ymm8, %ymm12
vpxor 64(%rdi), %ymm9, %ymm13
vpxor -32(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rax)
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, -32(%rax)
# Row 3
vpxor 64(%rax), %ymm9, %ymm10
vpxor -32(%rcx), %ymm5, %ymm11
vpxor 128(%rcx), %ymm6, %ymm12
vpxor (%rdi), %ymm7, %ymm13
vpxor -96(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rax)
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, -96(%rax)
# Row 4
vpxor 96(%rdi), %ymm7, %ymm10
vpxor (%rax), %ymm8, %ymm11
vpxor -96(%rcx), %ymm9, %ymm12
vpxor 64(%rcx), %ymm5, %ymm13
vpxor -64(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, -64(%rdi)
# Round 17
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm2, %ymm12
vpxor -32(%rdi), %ymm1, %ymm11
vpxor (%rdi), %ymm3, %ymm13
vpxor 32(%rdi), %ymm11, %ymm11
vpxor 64(%rdi), %ymm13, %ymm13
vpxor 128(%rdi), %ymm12, %ymm12
vpxor -96(%rax), %ymm4, %ymm14
vpxor -64(%rax), %ymm12, %ymm12
vpxor -32(%rax), %ymm14, %ymm14
vpxor 32(%rax), %ymm13, %ymm13
vpxor 64(%rax), %ymm10, %ymm10
vpxor 96(%rax), %ymm13, %ymm13
vpxor 128(%rax), %ymm10, %ymm10
vpxor -64(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm11, %ymm11
vpxor (%rcx), %ymm14, %ymm14
vpxor 32(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm10, %ymm10
vpxor 128(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -32(%rdi), %ymm6, %ymm11
vpxor -96(%rdi), %ymm7, %ymm12
vpxor (%rdi), %ymm8, %ymm13
vpxor -64(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 544(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, -64(%rdi)
# Row 1
vpxor 96(%rax), %ymm8, %ymm10
vpxor -64(%rcx), %ymm9, %ymm11
vpxor 128(%rax), %ymm5, %ymm12
vpxor -32(%rcx), %ymm6, %ymm13
vpxor -96(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rax)
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, -96(%rcx)
# Row 2
vpxor 32(%rdi), %ymm6, %ymm10
vpxor 128(%rdi), %ymm7, %ymm11
vpxor 64(%rdi), %ymm8, %ymm12
vpxor -96(%rax), %ymm9, %ymm13
vpxor 96(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rdi)
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, 96(%rdi)
# Row 3
vpxor (%rcx), %ymm9, %ymm10
vpxor 96(%rcx), %ymm5, %ymm11
vpxor 32(%rcx), %ymm6, %ymm12
vpxor 128(%rcx), %ymm7, %ymm13
vpxor 64(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rcx)
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, 64(%rcx)
# Row 4
vpxor -64(%rax), %ymm7, %ymm10
vpxor 32(%rax), %ymm8, %ymm11
vpxor -32(%rax), %ymm9, %ymm12
vpxor 64(%rax), %ymm5, %ymm13
vpxor (%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rax)
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, (%rax)
# Round 18
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm2, %ymm12
vpxor -64(%rdi), %ymm4, %ymm14
vpxor -32(%rdi), %ymm1, %ymm11
vpxor (%rdi), %ymm3, %ymm13
vpxor 32(%rdi), %ymm10, %ymm10
vpxor 64(%rdi), %ymm12, %ymm12
vpxor 96(%rdi), %ymm14, %ymm14
vpxor 128(%rdi), %ymm11, %ymm11
vpxor -96(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm10, %ymm10
vpxor 128(%rax), %ymm12, %ymm12
vpxor -96(%rcx), %ymm14, %ymm14
vpxor -64(%rcx), %ymm11, %ymm11
vpxor -32(%rcx), %ymm13, %ymm13
vpxor (%rcx), %ymm10, %ymm10
vpxor 32(%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm14, %ymm14
vpxor 96(%rcx), %ymm11, %ymm11
vpxor 128(%rcx), %ymm13, %ymm13
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -64(%rcx), %ymm6, %ymm11
vpxor 64(%rdi), %ymm7, %ymm12
vpxor 128(%rcx), %ymm8, %ymm13
vpxor (%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 576(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, (%rax)
# Row 1
vpxor (%rdi), %ymm8, %ymm10
vpxor -96(%rcx), %ymm9, %ymm11
vpxor 32(%rdi), %ymm5, %ymm12
vpxor 96(%rcx), %ymm6, %ymm13
vpxor -32(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, -32(%rax)
# Row 2
vpxor -32(%rdi), %ymm6, %ymm10
vpxor 128(%rax), %ymm7, %ymm11
vpxor -96(%rax), %ymm8, %ymm12
vpxor 64(%rcx), %ymm9, %ymm13
vpxor -64(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rdi)
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, -64(%rax)
# Row 3
vpxor -64(%rdi), %ymm9, %ymm10
vpxor 96(%rax), %ymm5, %ymm11
vpxor 128(%rdi), %ymm6, %ymm12
vpxor 32(%rcx), %ymm7, %ymm13
vpxor 64(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rdi)
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 64(%rax)
# Row 4
vpxor -96(%rdi), %ymm7, %ymm10
vpxor -32(%rcx), %ymm8, %ymm11
vpxor 96(%rdi), %ymm9, %ymm12
vpxor (%rcx), %ymm5, %ymm13
vpxor 32(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rdi)
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, 32(%rax)
# Round 19
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm10, %ymm10
vpxor -32(%rdi), %ymm10, %ymm10
vpxor (%rdi), %ymm10, %ymm10
vpxor 32(%rdi), %ymm2, %ymm12
vpxor 64(%rdi), %ymm12, %ymm12
vpxor 128(%rdi), %ymm12, %ymm12
vpxor -96(%rax), %ymm12, %ymm12
vpxor -64(%rax), %ymm4, %ymm14
vpxor -32(%rax), %ymm14, %ymm14
vpxor (%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm14, %ymm14
vpxor 96(%rax), %ymm1, %ymm11
vpxor 128(%rax), %ymm11, %ymm11
vpxor -96(%rcx), %ymm11, %ymm11
vpxor -64(%rcx), %ymm11, %ymm11
vpxor 32(%rcx), %ymm3, %ymm13
vpxor 64(%rcx), %ymm13, %ymm13
vpxor 96(%rcx), %ymm13, %ymm13
vpxor 128(%rcx), %ymm13, %ymm13
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -96(%rcx), %ymm6, %ymm11
vpxor -96(%rax), %ymm7, %ymm12
vpxor 32(%rcx), %ymm8, %ymm13
vpxor 32(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 608(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 32(%rax)
# Row 1
vpxor 128(%rcx), %ymm8, %ymm10
vpxor -32(%rax), %ymm9, %ymm11
vpxor -32(%rdi), %ymm5, %ymm12
vpxor 96(%rax), %ymm6, %ymm13
vpxor 96(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rcx)
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, 96(%rdi)
# Row 2
vpxor -64(%rcx), %ymm6, %ymm10
vpxor 32(%rdi), %ymm7, %ymm11
vpxor 64(%rcx), %ymm8, %ymm12
vpxor 64(%rax), %ymm9, %ymm13
vpxor -96(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rcx)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, -96(%rdi)
# Row 3
vpxor (%rax), %ymm9, %ymm10
vpxor (%rdi), %ymm5, %ymm11
vpxor 128(%rax), %ymm6, %ymm12
vpxor 128(%rdi), %ymm7, %ymm13
vpxor (%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rax)
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, (%rcx)
# Row 4
vpxor 64(%rdi), %ymm7, %ymm10
vpxor 96(%rcx), %ymm8, %ymm11
vpxor -64(%rax), %ymm9, %ymm12
vpxor -64(%rdi), %ymm5, %ymm13
vpxor -32(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, -32(%rcx)
# Round 20
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm4, %ymm14
vpxor -32(%rdi), %ymm2, %ymm12
vpxor (%rdi), %ymm1, %ymm11
vpxor 32(%rdi), %ymm11, %ymm11
vpxor 96(%rdi), %ymm14, %ymm14
vpxor 128(%rdi), %ymm3, %ymm13
vpxor -96(%rax), %ymm12, %ymm12
vpxor -32(%rax), %ymm11, %ymm11
vpxor (%rax), %ymm10, %ymm10
vpxor 32(%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm13, %ymm13
vpxor 128(%rax), %ymm12, %ymm12
vpxor -96(%rcx), %ymm11, %ymm11
vpxor -64(%rcx), %ymm10, %ymm10
vpxor (%rcx), %ymm14, %ymm14
vpxor 32(%rcx), %ymm13, %ymm13
vpxor 64(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm10, %ymm10
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -32(%rax), %ymm6, %ymm11
vpxor 64(%rcx), %ymm7, %ymm12
vpxor 128(%rdi), %ymm8, %ymm13
vpxor -32(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 640(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, -32(%rcx)
# Row 1
vpxor 32(%rcx), %ymm8, %ymm10
vpxor 96(%rdi), %ymm9, %ymm11
vpxor -64(%rcx), %ymm5, %ymm12
vpxor (%rdi), %ymm6, %ymm13
vpxor -64(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rcx)
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, -64(%rax)
# Row 2
vpxor -96(%rcx), %ymm6, %ymm10
vpxor -32(%rdi), %ymm7, %ymm11
vpxor 64(%rax), %ymm8, %ymm12
vpxor (%rcx), %ymm9, %ymm13
vpxor 64(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rcx)
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, 64(%rdi)
# Row 3
vpxor 32(%rax), %ymm9, %ymm10
vpxor 128(%rcx), %ymm5, %ymm11
vpxor 32(%rdi), %ymm6, %ymm12
vpxor 128(%rax), %ymm7, %ymm13
vpxor -64(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rax)
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, -64(%rdi)
# Row 4
vpxor -96(%rax), %ymm7, %ymm10
vpxor 96(%rax), %ymm8, %ymm11
vpxor -96(%rdi), %ymm9, %ymm12
vpxor (%rax), %ymm5, %ymm13
vpxor 96(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rax)
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, 96(%rcx)
# Round 21
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm4, %ymm14
vpxor -32(%rdi), %ymm1, %ymm11
vpxor (%rdi), %ymm3, %ymm13
vpxor 32(%rdi), %ymm2, %ymm12
vpxor 64(%rdi), %ymm14, %ymm14
vpxor 96(%rdi), %ymm11, %ymm11
vpxor 128(%rdi), %ymm13, %ymm13
vpxor -64(%rax), %ymm14, %ymm14
vpxor -32(%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm10, %ymm10
vpxor 64(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm13, %ymm13
vpxor -96(%rcx), %ymm10, %ymm10
vpxor -64(%rcx), %ymm12, %ymm12
vpxor -32(%rcx), %ymm14, %ymm14
vpxor (%rcx), %ymm13, %ymm13
vpxor 32(%rcx), %ymm10, %ymm10
vpxor 64(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 96(%rdi), %ymm6, %ymm11
vpxor 64(%rax), %ymm7, %ymm12
vpxor 128(%rax), %ymm8, %ymm13
vpxor 96(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 672(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, 96(%rcx)
# Row 1
vpxor 128(%rdi), %ymm8, %ymm10
vpxor -64(%rax), %ymm9, %ymm11
vpxor -96(%rcx), %ymm5, %ymm12
vpxor 128(%rcx), %ymm6, %ymm13
vpxor -96(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, -96(%rdi)
# Row 2
vpxor -32(%rax), %ymm6, %ymm10
vpxor -64(%rcx), %ymm7, %ymm11
vpxor (%rcx), %ymm8, %ymm12
vpxor -64(%rdi), %ymm9, %ymm13
vpxor -96(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rax)
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, -96(%rax)
# Row 3
vpxor -32(%rcx), %ymm9, %ymm10
vpxor 32(%rcx), %ymm5, %ymm11
vpxor -32(%rdi), %ymm6, %ymm12
vpxor 32(%rdi), %ymm7, %ymm13
vpxor (%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rcx)
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, (%rax)
# Row 4
vpxor 64(%rcx), %ymm7, %ymm10
vpxor (%rdi), %ymm8, %ymm11
vpxor 64(%rdi), %ymm9, %ymm12
vpxor 32(%rax), %ymm5, %ymm13
vpxor 96(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rcx)
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, 96(%rax)
# Round 22
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm4, %ymm14
vpxor -64(%rdi), %ymm3, %ymm13
vpxor -32(%rdi), %ymm2, %ymm12
vpxor 32(%rdi), %ymm13, %ymm13
vpxor 96(%rdi), %ymm1, %ymm11
vpxor 128(%rdi), %ymm10, %ymm10
vpxor -96(%rax), %ymm14, %ymm14
vpxor -64(%rax), %ymm11, %ymm11
vpxor -32(%rax), %ymm10, %ymm10
vpxor (%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm13, %ymm13
vpxor -96(%rcx), %ymm12, %ymm12
vpxor -64(%rcx), %ymm11, %ymm11
vpxor -32(%rcx), %ymm10, %ymm10
vpxor (%rcx), %ymm12, %ymm12
vpxor 32(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm14, %ymm14
vpxor 128(%rcx), %ymm13, %ymm13
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -64(%rax), %ymm6, %ymm11
vpxor (%rcx), %ymm7, %ymm12
vpxor 32(%rdi), %ymm8, %ymm13
vpxor 96(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 704(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, 96(%rax)
# Row 1
vpxor 128(%rax), %ymm8, %ymm10
vpxor -96(%rdi), %ymm9, %ymm11
vpxor -32(%rax), %ymm5, %ymm12
vpxor 32(%rcx), %ymm6, %ymm13
vpxor 64(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rax)
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 64(%rdi)
# Row 2
vpxor 96(%rdi), %ymm6, %ymm10
vpxor -96(%rcx), %ymm7, %ymm11
vpxor -64(%rdi), %ymm8, %ymm12
vpxor (%rax), %ymm9, %ymm13
vpxor 64(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, 64(%rcx)
# Row 3
vpxor 96(%rcx), %ymm9, %ymm10
vpxor 128(%rdi), %ymm5, %ymm11
vpxor -64(%rcx), %ymm6, %ymm12
vpxor -32(%rdi), %ymm7, %ymm13
vpxor 32(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rcx)
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, 32(%rax)
# Row 4
vpxor 64(%rax), %ymm7, %ymm10
vpxor 128(%rcx), %ymm8, %ymm11
vpxor -96(%rax), %ymm9, %ymm12
vpxor -32(%rcx), %ymm5, %ymm13
vpxor (%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rax)
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, (%rdi)
# Round 23
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm1, %ymm11
vpxor -64(%rdi), %ymm2, %ymm12
vpxor -32(%rdi), %ymm3, %ymm13
vpxor 32(%rdi), %ymm13, %ymm13
vpxor 64(%rdi), %ymm4, %ymm14
vpxor 96(%rdi), %ymm10, %ymm10
vpxor 128(%rdi), %ymm11, %ymm11
vpxor -64(%rax), %ymm11, %ymm11
vpxor -32(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm14, %ymm14
vpxor 96(%rax), %ymm14, %ymm14
vpxor 128(%rax), %ymm10, %ymm10
vpxor -96(%rcx), %ymm11, %ymm11
vpxor -64(%rcx), %ymm12, %ymm12
vpxor (%rcx), %ymm12, %ymm12
vpxor 32(%rcx), %ymm13, %ymm13
vpxor 64(%rcx), %ymm14, %ymm14
vpxor 96(%rcx), %ymm10, %ymm10
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -96(%rdi), %ymm6, %ymm11
vpxor -64(%rdi), %ymm7, %ymm12
vpxor -32(%rdi), %ymm8, %ymm13
vpxor (%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 736(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, (%rdi)
# Row 1
vpxor 32(%rdi), %ymm8, %ymm10
vpxor 64(%rdi), %ymm9, %ymm11
vpxor 96(%rdi), %ymm5, %ymm12
vpxor 128(%rdi), %ymm6, %ymm13
vpxor -96(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rdi)
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, -96(%rax)
# Row 2
vpxor -64(%rax), %ymm6, %ymm10
vpxor -32(%rax), %ymm7, %ymm11
vpxor (%rax), %ymm8, %ymm12
vpxor 32(%rax), %ymm9, %ymm13
vpxor 64(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rax)
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, 64(%rax)
# Row 3
vpxor 96(%rax), %ymm9, %ymm10
vpxor 128(%rax), %ymm5, %ymm11
vpxor -96(%rcx), %ymm6, %ymm12
vpxor -64(%rcx), %ymm7, %ymm13
vpxor -32(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rax)
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, -32(%rcx)
# Row 4
vpxor (%rcx), %ymm7, %ymm10
vpxor 32(%rcx), %ymm8, %ymm11
vpxor 64(%rcx), %ymm9, %ymm12
vpxor 96(%rcx), %ymm5, %ymm13
vpxor 128(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rcx)
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, 128(%rcx)
subq $0x80, %rdi
vmovdqu %ymm15, (%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_sha3_128_blocksx4_seed_avx2,.-kyber_sha3_128_blocksx4_seed_avx2
#endif /* __APPLE__ */
#ifndef __APPLE__
.data
#else
.section __DATA,__data
#endif /* __APPLE__ */
#ifndef __APPLE__
.align 32
#else
.p2align 5
#endif /* __APPLE__ */
L_sha3_256_blockx4_seed_avx2_end_mark:
.quad 0x8000000000000000, 0x8000000000000000
.quad 0x8000000000000000, 0x8000000000000000
#ifndef __APPLE__
.text
.globl kyber_sha3_256_blocksx4_seed_avx2
.type kyber_sha3_256_blocksx4_seed_avx2,@function
.align 16
kyber_sha3_256_blocksx4_seed_avx2:
#else
.section __TEXT,__text
.globl _kyber_sha3_256_blocksx4_seed_avx2
.p2align 4
_kyber_sha3_256_blocksx4_seed_avx2:
#endif /* __APPLE__ */
leaq L_sha3_parallel_4_r(%rip), %rdx
movq %rdi, %rax
movq %rdi, %rcx
vpbroadcastq (%rsi), %ymm15
addq $0x80, %rdi
vpbroadcastq 8(%rsi), %ymm11
addq $0x180, %rax
vpbroadcastq 16(%rsi), %ymm12
addq $0x280, %rcx
vpbroadcastq 24(%rsi), %ymm13
vmovdqu L_sha3_256_blockx4_seed_avx2_end_mark(%rip), %ymm5
vpxor %ymm6, %ymm6, %ymm6
vmovdqu %ymm11, -96(%rdi)
vmovdqu %ymm12, -64(%rdi)
vmovdqu %ymm13, -32(%rdi)
vmovdqu (%rdi), %ymm14
vmovdqu %ymm6, 32(%rdi)
vmovdqu %ymm6, 64(%rdi)
vmovdqu %ymm6, 96(%rdi)
vmovdqu %ymm6, 128(%rdi)
vmovdqu %ymm6, -96(%rax)
vmovdqu %ymm6, -64(%rax)
vmovdqu %ymm6, -32(%rax)
vmovdqu %ymm6, (%rax)
vmovdqu %ymm6, 32(%rax)
vmovdqu %ymm6, 64(%rax)
vmovdqu %ymm6, 96(%rax)
vmovdqu %ymm5, 128(%rax)
vmovdqu %ymm6, -96(%rcx)
vmovdqu %ymm6, -64(%rcx)
vmovdqu %ymm6, -32(%rcx)
vmovdqu %ymm6, (%rcx)
vmovdqu %ymm6, 32(%rcx)
vmovdqu %ymm6, 64(%rcx)
vmovdqu %ymm6, 96(%rcx)
vmovdqu %ymm6, 128(%rcx)
vmovdqu %ymm15, %ymm10
vpxor %ymm5, %ymm11, %ymm11
# Round 0
# Calc b[0..4]
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 64(%rdi), %ymm6, %ymm11
vpxor (%rax), %ymm7, %ymm12
vpxor -64(%rcx), %ymm8, %ymm13
vpxor 128(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor (%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, 128(%rcx)
# Row 1
vpxor -32(%rdi), %ymm8, %ymm10
vpxor -96(%rax), %ymm9, %ymm11
vpxor -64(%rax), %ymm5, %ymm12
vpxor 128(%rax), %ymm6, %ymm13
vpxor 64(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rdi)
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, 64(%rcx)
# Row 2
vpxor -96(%rdi), %ymm6, %ymm10
vpxor 96(%rdi), %ymm7, %ymm11
vpxor 32(%rax), %ymm8, %ymm12
vpxor -32(%rcx), %ymm9, %ymm13
vpxor (%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rdi)
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, (%rcx)
# Row 3
vpxor (%rdi), %ymm9, %ymm10
vpxor 32(%rdi), %ymm5, %ymm11
vpxor -32(%rax), %ymm6, %ymm12
vpxor -96(%rcx), %ymm7, %ymm13
vpxor 96(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, 96(%rcx)
# Row 4
vpxor -64(%rdi), %ymm7, %ymm10
vpxor 128(%rdi), %ymm8, %ymm11
vpxor 64(%rax), %ymm9, %ymm12
vpxor 96(%rax), %ymm5, %ymm13
vpxor 32(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rdi)
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, 32(%rcx)
# Round 1
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm10, %ymm10
vpxor -32(%rdi), %ymm10, %ymm10
vpxor (%rdi), %ymm10, %ymm10
vpxor 32(%rdi), %ymm1, %ymm11
vpxor 64(%rdi), %ymm11, %ymm11
vpxor 96(%rdi), %ymm11, %ymm11
vpxor -96(%rax), %ymm11, %ymm11
vpxor -64(%rax), %ymm2, %ymm12
vpxor -32(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm12, %ymm12
vpxor 32(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm3, %ymm13
vpxor -96(%rcx), %ymm13, %ymm13
vpxor -64(%rcx), %ymm13, %ymm13
vpxor -32(%rcx), %ymm13, %ymm13
vpxor (%rcx), %ymm4, %ymm14
vpxor 64(%rcx), %ymm14, %ymm14
vpxor 96(%rcx), %ymm14, %ymm14
vpxor 128(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -96(%rax), %ymm6, %ymm11
vpxor 32(%rax), %ymm7, %ymm12
vpxor -96(%rcx), %ymm8, %ymm13
vpxor 32(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 32(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, 32(%rcx)
# Row 1
vpxor -64(%rcx), %ymm8, %ymm10
vpxor 64(%rcx), %ymm9, %ymm11
vpxor -96(%rdi), %ymm5, %ymm12
vpxor 32(%rdi), %ymm6, %ymm13
vpxor 64(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rcx)
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, 64(%rax)
# Row 2
vpxor 64(%rdi), %ymm6, %ymm10
vpxor -64(%rax), %ymm7, %ymm11
vpxor -32(%rcx), %ymm8, %ymm12
vpxor 96(%rcx), %ymm9, %ymm13
vpxor -64(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, -64(%rdi)
# Row 3
vpxor 128(%rcx), %ymm9, %ymm10
vpxor -32(%rdi), %ymm5, %ymm11
vpxor 96(%rdi), %ymm6, %ymm12
vpxor -32(%rax), %ymm7, %ymm13
vpxor 96(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rcx)
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, 96(%rax)
# Row 4
vpxor (%rax), %ymm7, %ymm10
vpxor 128(%rax), %ymm8, %ymm11
vpxor (%rcx), %ymm9, %ymm12
vpxor (%rdi), %ymm5, %ymm13
vpxor 128(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rax)
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, 128(%rdi)
# Round 2
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm2, %ymm12
vpxor -64(%rdi), %ymm4, %ymm14
vpxor -32(%rdi), %ymm1, %ymm11
vpxor 32(%rdi), %ymm3, %ymm13
vpxor 64(%rdi), %ymm10, %ymm10
vpxor 96(%rdi), %ymm12, %ymm12
vpxor -96(%rax), %ymm11, %ymm11
vpxor -64(%rax), %ymm11, %ymm11
vpxor -32(%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm12, %ymm12
vpxor 64(%rax), %ymm14, %ymm14
vpxor 96(%rax), %ymm14, %ymm14
vpxor -96(%rcx), %ymm13, %ymm13
vpxor -64(%rcx), %ymm10, %ymm10
vpxor -32(%rcx), %ymm12, %ymm12
vpxor 32(%rcx), %ymm14, %ymm14
vpxor 64(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm13, %ymm13
vpxor 128(%rcx), %ymm10, %ymm10
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 64(%rcx), %ymm6, %ymm11
vpxor -32(%rcx), %ymm7, %ymm12
vpxor -32(%rax), %ymm8, %ymm13
vpxor 128(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 64(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, 128(%rdi)
# Row 1
vpxor -96(%rcx), %ymm8, %ymm10
vpxor 64(%rax), %ymm9, %ymm11
vpxor 64(%rdi), %ymm5, %ymm12
vpxor -32(%rdi), %ymm6, %ymm13
vpxor (%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rcx)
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, (%rcx)
# Row 2
vpxor -96(%rax), %ymm6, %ymm10
vpxor -96(%rdi), %ymm7, %ymm11
vpxor 96(%rcx), %ymm8, %ymm12
vpxor 96(%rax), %ymm9, %ymm13
vpxor (%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rax)
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, (%rax)
# Row 3
vpxor 32(%rcx), %ymm9, %ymm10
vpxor -64(%rcx), %ymm5, %ymm11
vpxor -64(%rax), %ymm6, %ymm12
vpxor 96(%rdi), %ymm7, %ymm13
vpxor (%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rcx)
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, (%rdi)
# Row 4
vpxor 32(%rax), %ymm7, %ymm10
vpxor 32(%rdi), %ymm8, %ymm11
vpxor -64(%rdi), %ymm9, %ymm12
vpxor 128(%rcx), %ymm5, %ymm13
vpxor 128(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rax)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, 128(%rax)
# Round 3
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm1, %ymm11
vpxor -32(%rdi), %ymm3, %ymm13
vpxor (%rdi), %ymm4, %ymm14
vpxor 64(%rdi), %ymm2, %ymm12
vpxor 96(%rdi), %ymm13, %ymm13
vpxor 128(%rdi), %ymm14, %ymm14
vpxor -96(%rax), %ymm10, %ymm10
vpxor -64(%rax), %ymm12, %ymm12
vpxor -32(%rax), %ymm13, %ymm13
vpxor (%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm11, %ymm11
vpxor 96(%rax), %ymm13, %ymm13
vpxor -96(%rcx), %ymm10, %ymm10
vpxor -64(%rcx), %ymm11, %ymm11
vpxor -32(%rcx), %ymm12, %ymm12
vpxor (%rcx), %ymm14, %ymm14
vpxor 32(%rcx), %ymm10, %ymm10
vpxor 64(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 64(%rax), %ymm6, %ymm11
vpxor 96(%rcx), %ymm7, %ymm12
vpxor 96(%rdi), %ymm8, %ymm13
vpxor 128(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 96(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, 128(%rax)
# Row 1
vpxor -32(%rax), %ymm8, %ymm10
vpxor (%rcx), %ymm9, %ymm11
vpxor -96(%rax), %ymm5, %ymm12
vpxor -64(%rcx), %ymm6, %ymm13
vpxor -64(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rax)
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, -64(%rdi)
# Row 2
vpxor 64(%rcx), %ymm6, %ymm10
vpxor 64(%rdi), %ymm7, %ymm11
vpxor 96(%rax), %ymm8, %ymm12
vpxor (%rdi), %ymm9, %ymm13
vpxor 32(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rcx)
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, 32(%rax)
# Row 3
vpxor 128(%rdi), %ymm9, %ymm10
vpxor -96(%rcx), %ymm5, %ymm11
vpxor -96(%rdi), %ymm6, %ymm12
vpxor -64(%rax), %ymm7, %ymm13
vpxor 128(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, 128(%rcx)
# Row 4
vpxor -32(%rcx), %ymm7, %ymm10
vpxor -32(%rdi), %ymm8, %ymm11
vpxor (%rax), %ymm9, %ymm12
vpxor 32(%rcx), %ymm5, %ymm13
vpxor 32(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rcx)
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 32(%rdi)
# Round 4
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm2, %ymm12
vpxor -64(%rdi), %ymm4, %ymm14
vpxor (%rdi), %ymm3, %ymm13
vpxor 64(%rdi), %ymm1, %ymm11
vpxor 96(%rdi), %ymm13, %ymm13
vpxor 128(%rdi), %ymm10, %ymm10
vpxor -96(%rax), %ymm12, %ymm12
vpxor -64(%rax), %ymm13, %ymm13
vpxor -32(%rax), %ymm10, %ymm10
vpxor 32(%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm11, %ymm11
vpxor 96(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm14, %ymm14
vpxor -96(%rcx), %ymm11, %ymm11
vpxor -64(%rcx), %ymm13, %ymm13
vpxor (%rcx), %ymm11, %ymm11
vpxor 64(%rcx), %ymm10, %ymm10
vpxor 96(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor (%rcx), %ymm6, %ymm11
vpxor 96(%rax), %ymm7, %ymm12
vpxor -64(%rax), %ymm8, %ymm13
vpxor 32(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 128(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, 32(%rdi)
# Row 1
vpxor 96(%rdi), %ymm8, %ymm10
vpxor -64(%rdi), %ymm9, %ymm11
vpxor 64(%rcx), %ymm5, %ymm12
vpxor -96(%rcx), %ymm6, %ymm13
vpxor (%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, (%rax)
# Row 2
vpxor 64(%rax), %ymm6, %ymm10
vpxor -96(%rax), %ymm7, %ymm11
vpxor (%rdi), %ymm8, %ymm12
vpxor 128(%rcx), %ymm9, %ymm13
vpxor -32(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rax)
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, -32(%rcx)
# Row 3
vpxor 128(%rax), %ymm9, %ymm10
vpxor -32(%rax), %ymm5, %ymm11
vpxor 64(%rdi), %ymm6, %ymm12
vpxor -96(%rdi), %ymm7, %ymm13
vpxor 32(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rax)
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, 32(%rcx)
# Row 4
vpxor 96(%rcx), %ymm7, %ymm10
vpxor -64(%rcx), %ymm8, %ymm11
vpxor 32(%rax), %ymm9, %ymm12
vpxor 128(%rdi), %ymm5, %ymm13
vpxor -32(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rcx)
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, -32(%rdi)
# Round 5
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm3, %ymm13
vpxor -64(%rdi), %ymm1, %ymm11
vpxor (%rdi), %ymm2, %ymm12
vpxor 32(%rdi), %ymm4, %ymm14
vpxor 64(%rdi), %ymm12, %ymm12
vpxor 96(%rdi), %ymm10, %ymm10
vpxor -96(%rax), %ymm11, %ymm11
vpxor -64(%rax), %ymm13, %ymm13
vpxor -32(%rax), %ymm11, %ymm11
vpxor (%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm10, %ymm10
vpxor 96(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm10, %ymm10
vpxor -96(%rcx), %ymm13, %ymm13
vpxor -32(%rcx), %ymm14, %ymm14
vpxor (%rcx), %ymm11, %ymm11
vpxor 32(%rcx), %ymm14, %ymm14
vpxor 64(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm13, %ymm13
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -64(%rdi), %ymm6, %ymm11
vpxor (%rdi), %ymm7, %ymm12
vpxor -96(%rdi), %ymm8, %ymm13
vpxor -32(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 160(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, -32(%rdi)
# Row 1
vpxor -64(%rax), %ymm8, %ymm10
vpxor (%rax), %ymm9, %ymm11
vpxor 64(%rax), %ymm5, %ymm12
vpxor -32(%rax), %ymm6, %ymm13
vpxor 32(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rax)
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, 32(%rax)
# Row 2
vpxor (%rcx), %ymm6, %ymm10
vpxor 64(%rcx), %ymm7, %ymm11
vpxor 128(%rcx), %ymm8, %ymm12
vpxor 32(%rcx), %ymm9, %ymm13
vpxor 96(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rcx)
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 96(%rcx)
# Row 3
vpxor 32(%rdi), %ymm9, %ymm10
vpxor 96(%rdi), %ymm5, %ymm11
vpxor -96(%rax), %ymm6, %ymm12
vpxor 64(%rdi), %ymm7, %ymm13
vpxor 128(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rdi)
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, 128(%rdi)
# Row 4
vpxor 96(%rax), %ymm7, %ymm10
vpxor -96(%rcx), %ymm8, %ymm11
vpxor -32(%rcx), %ymm9, %ymm12
vpxor 128(%rax), %ymm5, %ymm13
vpxor -64(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rax)
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, -64(%rcx)
# Round 6
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm3, %ymm13
vpxor -64(%rdi), %ymm1, %ymm11
vpxor -32(%rdi), %ymm4, %ymm14
vpxor (%rdi), %ymm2, %ymm12
vpxor 32(%rdi), %ymm10, %ymm10
vpxor 64(%rdi), %ymm13, %ymm13
vpxor 96(%rdi), %ymm11, %ymm11
vpxor 128(%rdi), %ymm14, %ymm14
vpxor -96(%rax), %ymm12, %ymm12
vpxor -64(%rax), %ymm10, %ymm10
vpxor -32(%rax), %ymm13, %ymm13
vpxor (%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm12, %ymm12
vpxor (%rcx), %ymm10, %ymm10
vpxor 32(%rcx), %ymm13, %ymm13
vpxor 64(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm14, %ymm14
vpxor 128(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor (%rax), %ymm6, %ymm11
vpxor 128(%rcx), %ymm7, %ymm12
vpxor 64(%rdi), %ymm8, %ymm13
vpxor -64(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 192(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, -64(%rcx)
# Row 1
vpxor -96(%rdi), %ymm8, %ymm10
vpxor 32(%rax), %ymm9, %ymm11
vpxor (%rcx), %ymm5, %ymm12
vpxor 96(%rdi), %ymm6, %ymm13
vpxor -32(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rdi)
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, -32(%rcx)
# Row 2
vpxor -64(%rdi), %ymm6, %ymm10
vpxor 64(%rax), %ymm7, %ymm11
vpxor 32(%rcx), %ymm8, %ymm12
vpxor 128(%rdi), %ymm9, %ymm13
vpxor 96(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rdi)
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, 96(%rax)
# Row 3
vpxor -32(%rdi), %ymm9, %ymm10
vpxor -64(%rax), %ymm5, %ymm11
vpxor 64(%rcx), %ymm6, %ymm12
vpxor -96(%rax), %ymm7, %ymm13
vpxor 128(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rdi)
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, 128(%rax)
# Row 4
vpxor (%rdi), %ymm7, %ymm10
vpxor -32(%rax), %ymm8, %ymm11
vpxor 96(%rcx), %ymm9, %ymm12
vpxor 32(%rdi), %ymm5, %ymm13
vpxor -96(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, -96(%rcx)
# Round 7
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm10, %ymm10
vpxor -64(%rdi), %ymm10, %ymm10
vpxor -32(%rdi), %ymm10, %ymm10
vpxor 64(%rdi), %ymm3, %ymm13
vpxor 96(%rdi), %ymm13, %ymm13
vpxor 128(%rdi), %ymm13, %ymm13
vpxor -96(%rax), %ymm13, %ymm13
vpxor -64(%rax), %ymm1, %ymm11
vpxor (%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm11, %ymm11
vpxor 64(%rax), %ymm11, %ymm11
vpxor 96(%rax), %ymm4, %ymm14
vpxor 128(%rax), %ymm14, %ymm14
vpxor -64(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm14, %ymm14
vpxor (%rcx), %ymm2, %ymm12
vpxor 32(%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 32(%rax), %ymm6, %ymm11
vpxor 32(%rcx), %ymm7, %ymm12
vpxor -96(%rax), %ymm8, %ymm13
vpxor -96(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 224(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, -96(%rcx)
# Row 1
vpxor 64(%rdi), %ymm8, %ymm10
vpxor -32(%rcx), %ymm9, %ymm11
vpxor -64(%rdi), %ymm5, %ymm12
vpxor -64(%rax), %ymm6, %ymm13
vpxor 96(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, 96(%rcx)
# Row 2
vpxor (%rax), %ymm6, %ymm10
vpxor (%rcx), %ymm7, %ymm11
vpxor 128(%rdi), %ymm8, %ymm12
vpxor 128(%rax), %ymm9, %ymm13
vpxor (%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rax)
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, (%rdi)
# Row 3
vpxor -64(%rcx), %ymm9, %ymm10
vpxor -96(%rdi), %ymm5, %ymm11
vpxor 64(%rax), %ymm6, %ymm12
vpxor 64(%rcx), %ymm7, %ymm13
vpxor 32(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rcx)
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, 32(%rdi)
# Row 4
vpxor 128(%rcx), %ymm7, %ymm10
vpxor 96(%rdi), %ymm8, %ymm11
vpxor 96(%rax), %ymm9, %ymm12
vpxor -32(%rdi), %ymm5, %ymm13
vpxor -32(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rcx)
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, -32(%rax)
# Round 8
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm1, %ymm11
vpxor -64(%rdi), %ymm2, %ymm12
vpxor (%rdi), %ymm4, %ymm14
vpxor 32(%rdi), %ymm14, %ymm14
vpxor 64(%rdi), %ymm10, %ymm10
vpxor 128(%rdi), %ymm12, %ymm12
vpxor -96(%rax), %ymm3, %ymm13
vpxor -64(%rax), %ymm13, %ymm13
vpxor (%rax), %ymm10, %ymm10
vpxor 32(%rax), %ymm11, %ymm11
vpxor 64(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm13, %ymm13
vpxor -96(%rcx), %ymm14, %ymm14
vpxor -64(%rcx), %ymm10, %ymm10
vpxor -32(%rcx), %ymm11, %ymm11
vpxor (%rcx), %ymm11, %ymm11
vpxor 32(%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm13, %ymm13
vpxor 96(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -32(%rcx), %ymm6, %ymm11
vpxor 128(%rdi), %ymm7, %ymm12
vpxor 64(%rcx), %ymm8, %ymm13
vpxor -32(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 256(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, -32(%rax)
# Row 1
vpxor -96(%rax), %ymm8, %ymm10
vpxor 96(%rcx), %ymm9, %ymm11
vpxor (%rax), %ymm5, %ymm12
vpxor -96(%rdi), %ymm6, %ymm13
vpxor 96(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rax)
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, 96(%rax)
# Row 2
vpxor 32(%rax), %ymm6, %ymm10
vpxor -64(%rdi), %ymm7, %ymm11
vpxor 128(%rax), %ymm8, %ymm12
vpxor 32(%rdi), %ymm9, %ymm13
vpxor 128(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rax)
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, 128(%rcx)
# Row 3
vpxor -96(%rcx), %ymm9, %ymm10
vpxor 64(%rdi), %ymm5, %ymm11
vpxor (%rcx), %ymm6, %ymm12
vpxor 64(%rax), %ymm7, %ymm13
vpxor -32(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rcx)
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, -32(%rdi)
# Row 4
vpxor 32(%rcx), %ymm7, %ymm10
vpxor -64(%rax), %ymm8, %ymm11
vpxor (%rdi), %ymm9, %ymm12
vpxor -64(%rcx), %ymm5, %ymm13
vpxor 96(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rcx)
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, 96(%rdi)
# Round 9
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm3, %ymm13
vpxor -64(%rdi), %ymm1, %ymm11
vpxor -32(%rdi), %ymm4, %ymm14
vpxor 32(%rdi), %ymm13, %ymm13
vpxor 64(%rdi), %ymm11, %ymm11
vpxor 128(%rdi), %ymm2, %ymm12
vpxor -96(%rax), %ymm10, %ymm10
vpxor -32(%rax), %ymm14, %ymm14
vpxor (%rax), %ymm12, %ymm12
vpxor 32(%rax), %ymm10, %ymm10
vpxor 64(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm14, %ymm14
vpxor 128(%rax), %ymm12, %ymm12
vpxor -96(%rcx), %ymm10, %ymm10
vpxor -32(%rcx), %ymm11, %ymm11
vpxor (%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm13, %ymm13
vpxor 96(%rcx), %ymm11, %ymm11
vpxor 128(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 96(%rcx), %ymm6, %ymm11
vpxor 128(%rax), %ymm7, %ymm12
vpxor 64(%rax), %ymm8, %ymm13
vpxor 96(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 288(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, 96(%rdi)
# Row 1
vpxor 64(%rcx), %ymm8, %ymm10
vpxor 96(%rax), %ymm9, %ymm11
vpxor 32(%rax), %ymm5, %ymm12
vpxor 64(%rdi), %ymm6, %ymm13
vpxor (%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rcx)
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, (%rdi)
# Row 2
vpxor -32(%rcx), %ymm6, %ymm10
vpxor (%rax), %ymm7, %ymm11
vpxor 32(%rdi), %ymm8, %ymm12
vpxor -32(%rdi), %ymm9, %ymm13
vpxor 32(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rcx)
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, 32(%rcx)
# Row 3
vpxor -32(%rax), %ymm9, %ymm10
vpxor -96(%rax), %ymm5, %ymm11
vpxor -64(%rdi), %ymm6, %ymm12
vpxor (%rcx), %ymm7, %ymm13
vpxor -64(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rax)
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, -64(%rcx)
# Row 4
vpxor 128(%rdi), %ymm7, %ymm10
vpxor -96(%rdi), %ymm8, %ymm11
vpxor 128(%rcx), %ymm9, %ymm12
vpxor -96(%rcx), %ymm5, %ymm13
vpxor -64(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, -64(%rax)
# Round 10
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm2, %ymm12
vpxor -32(%rdi), %ymm3, %ymm13
vpxor (%rdi), %ymm4, %ymm14
vpxor 32(%rdi), %ymm12, %ymm12
vpxor 64(%rdi), %ymm13, %ymm13
vpxor 96(%rdi), %ymm14, %ymm14
vpxor -96(%rax), %ymm1, %ymm11
vpxor -32(%rax), %ymm10, %ymm10
vpxor (%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm12, %ymm12
vpxor 64(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm11, %ymm11
vpxor 128(%rax), %ymm12, %ymm12
vpxor -64(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm10, %ymm10
vpxor (%rcx), %ymm13, %ymm13
vpxor 32(%rcx), %ymm14, %ymm14
vpxor 64(%rcx), %ymm10, %ymm10
vpxor 96(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 96(%rax), %ymm6, %ymm11
vpxor 32(%rdi), %ymm7, %ymm12
vpxor (%rcx), %ymm8, %ymm13
vpxor -64(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 320(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, -64(%rax)
# Row 1
vpxor 64(%rax), %ymm8, %ymm10
vpxor (%rdi), %ymm9, %ymm11
vpxor -32(%rcx), %ymm5, %ymm12
vpxor -96(%rax), %ymm6, %ymm13
vpxor 128(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rax)
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, 128(%rcx)
# Row 2
vpxor 96(%rcx), %ymm6, %ymm10
vpxor 32(%rax), %ymm7, %ymm11
vpxor -32(%rdi), %ymm8, %ymm12
vpxor -64(%rcx), %ymm9, %ymm13
vpxor 128(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rcx)
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, 128(%rdi)
# Row 3
vpxor 96(%rdi), %ymm9, %ymm10
vpxor 64(%rcx), %ymm5, %ymm11
vpxor (%rax), %ymm6, %ymm12
vpxor -64(%rdi), %ymm7, %ymm13
vpxor -96(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, -96(%rcx)
# Row 4
vpxor 128(%rax), %ymm7, %ymm10
vpxor 64(%rdi), %ymm8, %ymm11
vpxor 32(%rcx), %ymm9, %ymm12
vpxor -32(%rax), %ymm5, %ymm13
vpxor -96(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rax)
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, -96(%rdi)
# Round 11
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm3, %ymm13
vpxor -32(%rdi), %ymm2, %ymm12
vpxor (%rdi), %ymm1, %ymm11
vpxor 32(%rdi), %ymm12, %ymm12
vpxor 96(%rdi), %ymm10, %ymm10
vpxor 128(%rdi), %ymm4, %ymm14
vpxor -96(%rax), %ymm13, %ymm13
vpxor -64(%rax), %ymm14, %ymm14
vpxor (%rax), %ymm12, %ymm12
vpxor 32(%rax), %ymm11, %ymm11
vpxor 64(%rax), %ymm10, %ymm10
vpxor 96(%rax), %ymm11, %ymm11
vpxor -96(%rcx), %ymm14, %ymm14
vpxor -64(%rcx), %ymm13, %ymm13
vpxor -32(%rcx), %ymm12, %ymm12
vpxor (%rcx), %ymm13, %ymm13
vpxor 64(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm10, %ymm10
vpxor 128(%rcx), %ymm14, %ymm14
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor (%rdi), %ymm6, %ymm11
vpxor -32(%rdi), %ymm7, %ymm12
vpxor -64(%rdi), %ymm8, %ymm13
vpxor -96(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 352(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, -96(%rdi)
# Row 1
vpxor (%rcx), %ymm8, %ymm10
vpxor 128(%rcx), %ymm9, %ymm11
vpxor 96(%rcx), %ymm5, %ymm12
vpxor 64(%rcx), %ymm6, %ymm13
vpxor 32(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rcx)
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, 32(%rcx)
# Row 2
vpxor 96(%rax), %ymm6, %ymm10
vpxor -32(%rcx), %ymm7, %ymm11
vpxor -64(%rcx), %ymm8, %ymm12
vpxor -96(%rcx), %ymm9, %ymm13
vpxor 128(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rax)
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, -96(%rcx)
vmovdqu %ymm4, 128(%rax)
# Row 3
vpxor -64(%rax), %ymm9, %ymm10
vpxor 64(%rax), %ymm5, %ymm11
vpxor 32(%rax), %ymm6, %ymm12
vpxor (%rax), %ymm7, %ymm13
vpxor -32(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rax)
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 32(%rax)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, -32(%rax)
# Row 4
vpxor 32(%rdi), %ymm7, %ymm10
vpxor -96(%rax), %ymm8, %ymm11
vpxor 128(%rdi), %ymm9, %ymm12
vpxor 96(%rdi), %ymm5, %ymm13
vpxor 64(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rdi)
vmovdqu %ymm1, -96(%rax)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, 64(%rdi)
# Round 12
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm4, %ymm14
vpxor -64(%rdi), %ymm3, %ymm13
vpxor -32(%rdi), %ymm2, %ymm12
vpxor (%rdi), %ymm1, %ymm11
vpxor -64(%rax), %ymm10, %ymm10
vpxor -32(%rax), %ymm14, %ymm14
vpxor (%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm12, %ymm12
vpxor 64(%rax), %ymm11, %ymm11
vpxor 96(%rax), %ymm10, %ymm10
vpxor 128(%rax), %ymm14, %ymm14
vpxor -96(%rcx), %ymm13, %ymm13
vpxor -64(%rcx), %ymm12, %ymm12
vpxor -32(%rcx), %ymm11, %ymm11
vpxor (%rcx), %ymm10, %ymm10
vpxor 32(%rcx), %ymm14, %ymm14
vpxor 64(%rcx), %ymm13, %ymm13
vpxor 96(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 128(%rcx), %ymm6, %ymm11
vpxor -64(%rcx), %ymm7, %ymm12
vpxor (%rax), %ymm8, %ymm13
vpxor 64(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 384(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, 64(%rdi)
# Row 1
vpxor -64(%rdi), %ymm8, %ymm10
vpxor 32(%rcx), %ymm9, %ymm11
vpxor 96(%rax), %ymm5, %ymm12
vpxor 64(%rax), %ymm6, %ymm13
vpxor 128(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rdi)
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, 128(%rdi)
# Row 2
vpxor (%rdi), %ymm6, %ymm10
vpxor 96(%rcx), %ymm7, %ymm11
vpxor -96(%rcx), %ymm8, %ymm12
vpxor -32(%rax), %ymm9, %ymm13
vpxor 32(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, -32(%rax)
vmovdqu %ymm4, 32(%rdi)
# Row 3
vpxor -96(%rdi), %ymm9, %ymm10
vpxor (%rcx), %ymm5, %ymm11
vpxor -32(%rcx), %ymm6, %ymm12
vpxor 32(%rax), %ymm7, %ymm13
vpxor 96(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rdi)
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, -32(%rcx)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, 96(%rdi)
# Row 4
vpxor -32(%rdi), %ymm7, %ymm10
vpxor 64(%rcx), %ymm8, %ymm11
vpxor 128(%rax), %ymm9, %ymm12
vpxor -64(%rax), %ymm5, %ymm13
vpxor -96(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rdi)
vmovdqu %ymm1, 64(%rcx)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, -96(%rax)
# Round 13
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm10, %ymm10
vpxor -64(%rdi), %ymm10, %ymm10
vpxor (%rdi), %ymm10, %ymm10
vpxor 32(%rdi), %ymm4, %ymm14
vpxor 64(%rdi), %ymm14, %ymm14
vpxor 96(%rdi), %ymm14, %ymm14
vpxor 128(%rdi), %ymm14, %ymm14
vpxor -32(%rax), %ymm3, %ymm13
vpxor (%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm13, %ymm13
vpxor 64(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm2, %ymm12
vpxor -96(%rcx), %ymm12, %ymm12
vpxor -64(%rcx), %ymm12, %ymm12
vpxor -32(%rcx), %ymm12, %ymm12
vpxor (%rcx), %ymm1, %ymm11
vpxor 32(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm11, %ymm11
vpxor 128(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 32(%rcx), %ymm6, %ymm11
vpxor -96(%rcx), %ymm7, %ymm12
vpxor 32(%rax), %ymm8, %ymm13
vpxor -96(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 416(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, -96(%rax)
# Row 1
vpxor (%rax), %ymm8, %ymm10
vpxor 128(%rdi), %ymm9, %ymm11
vpxor (%rdi), %ymm5, %ymm12
vpxor (%rcx), %ymm6, %ymm13
vpxor 128(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rax)
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, 128(%rax)
# Row 2
vpxor 128(%rcx), %ymm6, %ymm10
vpxor 96(%rax), %ymm7, %ymm11
vpxor -32(%rax), %ymm8, %ymm12
vpxor 96(%rdi), %ymm9, %ymm13
vpxor -32(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rcx)
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, -32(%rdi)
# Row 3
vpxor 64(%rdi), %ymm9, %ymm10
vpxor -64(%rdi), %ymm5, %ymm11
vpxor 96(%rcx), %ymm6, %ymm12
vpxor -32(%rcx), %ymm7, %ymm13
vpxor -64(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, 96(%rcx)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, -64(%rax)
# Row 4
vpxor -64(%rcx), %ymm7, %ymm10
vpxor 64(%rax), %ymm8, %ymm11
vpxor 32(%rdi), %ymm9, %ymm12
vpxor -96(%rdi), %ymm5, %ymm13
vpxor 64(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rcx)
vmovdqu %ymm1, 64(%rax)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, 64(%rcx)
# Round 14
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm1, %ymm11
vpxor -32(%rdi), %ymm4, %ymm14
vpxor (%rdi), %ymm2, %ymm12
vpxor 64(%rdi), %ymm10, %ymm10
vpxor 96(%rdi), %ymm3, %ymm13
vpxor 128(%rdi), %ymm11, %ymm11
vpxor -96(%rax), %ymm14, %ymm14
vpxor -64(%rax), %ymm14, %ymm14
vpxor -32(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm10, %ymm10
vpxor 32(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm11, %ymm11
vpxor 128(%rax), %ymm14, %ymm14
vpxor -96(%rcx), %ymm12, %ymm12
vpxor -32(%rcx), %ymm13, %ymm13
vpxor (%rcx), %ymm13, %ymm13
vpxor 32(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm10, %ymm10
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 128(%rdi), %ymm6, %ymm11
vpxor -32(%rax), %ymm7, %ymm12
vpxor -32(%rcx), %ymm8, %ymm13
vpxor 64(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 448(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, 64(%rcx)
# Row 1
vpxor 32(%rax), %ymm8, %ymm10
vpxor 128(%rax), %ymm9, %ymm11
vpxor 128(%rcx), %ymm5, %ymm12
vpxor -64(%rdi), %ymm6, %ymm13
vpxor 32(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rax)
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, 32(%rdi)
# Row 2
vpxor 32(%rcx), %ymm6, %ymm10
vpxor (%rdi), %ymm7, %ymm11
vpxor 96(%rdi), %ymm8, %ymm12
vpxor -64(%rax), %ymm9, %ymm13
vpxor -64(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rcx)
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, -64(%rax)
vmovdqu %ymm4, -64(%rcx)
# Row 3
vpxor -96(%rax), %ymm9, %ymm10
vpxor (%rax), %ymm5, %ymm11
vpxor 96(%rax), %ymm6, %ymm12
vpxor 96(%rcx), %ymm7, %ymm13
vpxor -96(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rax)
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, 96(%rax)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, -96(%rdi)
# Row 4
vpxor -96(%rcx), %ymm7, %ymm10
vpxor (%rcx), %ymm8, %ymm11
vpxor -32(%rdi), %ymm9, %ymm12
vpxor 64(%rdi), %ymm5, %ymm13
vpxor 64(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rcx)
vmovdqu %ymm1, (%rcx)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, 64(%rax)
# Round 15
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm4, %ymm14
vpxor -64(%rdi), %ymm3, %ymm13
vpxor (%rdi), %ymm1, %ymm11
vpxor 32(%rdi), %ymm14, %ymm14
vpxor 96(%rdi), %ymm2, %ymm12
vpxor 128(%rdi), %ymm11, %ymm11
vpxor -96(%rax), %ymm10, %ymm10
vpxor -64(%rax), %ymm13, %ymm13
vpxor -32(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm10, %ymm10
vpxor 96(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm11, %ymm11
vpxor -64(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm13, %ymm13
vpxor 32(%rcx), %ymm10, %ymm10
vpxor 64(%rcx), %ymm14, %ymm14
vpxor 96(%rcx), %ymm13, %ymm13
vpxor 128(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 128(%rax), %ymm6, %ymm11
vpxor 96(%rdi), %ymm7, %ymm12
vpxor 96(%rcx), %ymm8, %ymm13
vpxor 64(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 480(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, 64(%rax)
# Row 1
vpxor -32(%rcx), %ymm8, %ymm10
vpxor 32(%rdi), %ymm9, %ymm11
vpxor 32(%rcx), %ymm5, %ymm12
vpxor (%rax), %ymm6, %ymm13
vpxor -32(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rcx)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, -32(%rdi)
# Row 2
vpxor 128(%rdi), %ymm6, %ymm10
vpxor 128(%rcx), %ymm7, %ymm11
vpxor -64(%rax), %ymm8, %ymm12
vpxor -96(%rdi), %ymm9, %ymm13
vpxor -96(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, -96(%rdi)
vmovdqu %ymm4, -96(%rcx)
# Row 3
vpxor 64(%rcx), %ymm9, %ymm10
vpxor 32(%rax), %ymm5, %ymm11
vpxor (%rdi), %ymm6, %ymm12
vpxor 96(%rax), %ymm7, %ymm13
vpxor 64(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rcx)
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, (%rdi)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, 64(%rdi)
# Row 4
vpxor -32(%rax), %ymm7, %ymm10
vpxor -64(%rdi), %ymm8, %ymm11
vpxor -64(%rcx), %ymm9, %ymm12
vpxor -96(%rax), %ymm5, %ymm13
vpxor (%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rax)
vmovdqu %ymm1, -64(%rdi)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, (%rcx)
# Round 16
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm3, %ymm13
vpxor -32(%rdi), %ymm4, %ymm14
vpxor (%rdi), %ymm2, %ymm12
vpxor 32(%rdi), %ymm1, %ymm11
vpxor 64(%rdi), %ymm14, %ymm14
vpxor 96(%rdi), %ymm12, %ymm12
vpxor 128(%rdi), %ymm10, %ymm10
vpxor -64(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm11, %ymm11
vpxor 64(%rax), %ymm14, %ymm14
vpxor 96(%rax), %ymm13, %ymm13
vpxor 128(%rax), %ymm11, %ymm11
vpxor -96(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm10, %ymm10
vpxor 32(%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm10, %ymm10
vpxor 96(%rcx), %ymm13, %ymm13
vpxor 128(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 32(%rdi), %ymm6, %ymm11
vpxor -64(%rax), %ymm7, %ymm12
vpxor 96(%rax), %ymm8, %ymm13
vpxor (%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 512(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, (%rcx)
# Row 1
vpxor 96(%rcx), %ymm8, %ymm10
vpxor -32(%rdi), %ymm9, %ymm11
vpxor 128(%rdi), %ymm5, %ymm12
vpxor 32(%rax), %ymm6, %ymm13
vpxor -64(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rcx)
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, -64(%rcx)
# Row 2
vpxor 128(%rax), %ymm6, %ymm10
vpxor 32(%rcx), %ymm7, %ymm11
vpxor -96(%rdi), %ymm8, %ymm12
vpxor 64(%rdi), %ymm9, %ymm13
vpxor -32(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rax)
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, 64(%rdi)
vmovdqu %ymm4, -32(%rax)
# Row 3
vpxor 64(%rax), %ymm9, %ymm10
vpxor -32(%rcx), %ymm5, %ymm11
vpxor 128(%rcx), %ymm6, %ymm12
vpxor (%rdi), %ymm7, %ymm13
vpxor -96(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rax)
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, 128(%rcx)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, -96(%rax)
# Row 4
vpxor 96(%rdi), %ymm7, %ymm10
vpxor (%rax), %ymm8, %ymm11
vpxor -96(%rcx), %ymm9, %ymm12
vpxor 64(%rcx), %ymm5, %ymm13
vpxor -64(%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm1, (%rax)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, -64(%rdi)
# Round 17
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm2, %ymm12
vpxor -32(%rdi), %ymm1, %ymm11
vpxor (%rdi), %ymm3, %ymm13
vpxor 32(%rdi), %ymm11, %ymm11
vpxor 64(%rdi), %ymm13, %ymm13
vpxor 128(%rdi), %ymm12, %ymm12
vpxor -96(%rax), %ymm4, %ymm14
vpxor -64(%rax), %ymm12, %ymm12
vpxor -32(%rax), %ymm14, %ymm14
vpxor 32(%rax), %ymm13, %ymm13
vpxor 64(%rax), %ymm10, %ymm10
vpxor 96(%rax), %ymm13, %ymm13
vpxor 128(%rax), %ymm10, %ymm10
vpxor -64(%rcx), %ymm14, %ymm14
vpxor -32(%rcx), %ymm11, %ymm11
vpxor (%rcx), %ymm14, %ymm14
vpxor 32(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm10, %ymm10
vpxor 128(%rcx), %ymm12, %ymm12
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -32(%rdi), %ymm6, %ymm11
vpxor -96(%rdi), %ymm7, %ymm12
vpxor (%rdi), %ymm8, %ymm13
vpxor -64(%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 544(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, -64(%rdi)
# Row 1
vpxor 96(%rax), %ymm8, %ymm10
vpxor -64(%rcx), %ymm9, %ymm11
vpxor 128(%rax), %ymm5, %ymm12
vpxor -32(%rcx), %ymm6, %ymm13
vpxor -96(%rcx), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rax)
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, -96(%rcx)
# Row 2
vpxor 32(%rdi), %ymm6, %ymm10
vpxor 128(%rdi), %ymm7, %ymm11
vpxor 64(%rdi), %ymm8, %ymm12
vpxor -96(%rax), %ymm9, %ymm13
vpxor 96(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rdi)
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, -96(%rax)
vmovdqu %ymm4, 96(%rdi)
# Row 3
vpxor (%rcx), %ymm9, %ymm10
vpxor 96(%rcx), %ymm5, %ymm11
vpxor 32(%rcx), %ymm6, %ymm12
vpxor 128(%rcx), %ymm7, %ymm13
vpxor 64(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rcx)
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, 32(%rcx)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, 64(%rcx)
# Row 4
vpxor -64(%rax), %ymm7, %ymm10
vpxor 32(%rax), %ymm8, %ymm11
vpxor -32(%rax), %ymm9, %ymm12
vpxor 64(%rax), %ymm5, %ymm13
vpxor (%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rax)
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, (%rax)
# Round 18
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm2, %ymm12
vpxor -64(%rdi), %ymm4, %ymm14
vpxor -32(%rdi), %ymm1, %ymm11
vpxor (%rdi), %ymm3, %ymm13
vpxor 32(%rdi), %ymm10, %ymm10
vpxor 64(%rdi), %ymm12, %ymm12
vpxor 96(%rdi), %ymm14, %ymm14
vpxor 128(%rdi), %ymm11, %ymm11
vpxor -96(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm10, %ymm10
vpxor 128(%rax), %ymm12, %ymm12
vpxor -96(%rcx), %ymm14, %ymm14
vpxor -64(%rcx), %ymm11, %ymm11
vpxor -32(%rcx), %ymm13, %ymm13
vpxor (%rcx), %ymm10, %ymm10
vpxor 32(%rcx), %ymm12, %ymm12
vpxor 64(%rcx), %ymm14, %ymm14
vpxor 96(%rcx), %ymm11, %ymm11
vpxor 128(%rcx), %ymm13, %ymm13
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -64(%rcx), %ymm6, %ymm11
vpxor 64(%rdi), %ymm7, %ymm12
vpxor 128(%rcx), %ymm8, %ymm13
vpxor (%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 576(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, (%rax)
# Row 1
vpxor (%rdi), %ymm8, %ymm10
vpxor -96(%rcx), %ymm9, %ymm11
vpxor 32(%rdi), %ymm5, %ymm12
vpxor 96(%rcx), %ymm6, %ymm13
vpxor -32(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, -32(%rax)
# Row 2
vpxor -32(%rdi), %ymm6, %ymm10
vpxor 128(%rax), %ymm7, %ymm11
vpxor -96(%rax), %ymm8, %ymm12
vpxor 64(%rcx), %ymm9, %ymm13
vpxor -64(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rdi)
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, 64(%rcx)
vmovdqu %ymm4, -64(%rax)
# Row 3
vpxor -64(%rdi), %ymm9, %ymm10
vpxor 96(%rax), %ymm5, %ymm11
vpxor 128(%rdi), %ymm6, %ymm12
vpxor 32(%rcx), %ymm7, %ymm13
vpxor 64(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rdi)
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, 128(%rdi)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 64(%rax)
# Row 4
vpxor -96(%rdi), %ymm7, %ymm10
vpxor -32(%rcx), %ymm8, %ymm11
vpxor 96(%rdi), %ymm9, %ymm12
vpxor (%rcx), %ymm5, %ymm13
vpxor 32(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rdi)
vmovdqu %ymm1, -32(%rcx)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, 32(%rax)
# Round 19
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm10, %ymm10
vpxor -32(%rdi), %ymm10, %ymm10
vpxor (%rdi), %ymm10, %ymm10
vpxor 32(%rdi), %ymm2, %ymm12
vpxor 64(%rdi), %ymm12, %ymm12
vpxor 128(%rdi), %ymm12, %ymm12
vpxor -96(%rax), %ymm12, %ymm12
vpxor -64(%rax), %ymm4, %ymm14
vpxor -32(%rax), %ymm14, %ymm14
vpxor (%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm14, %ymm14
vpxor 96(%rax), %ymm1, %ymm11
vpxor 128(%rax), %ymm11, %ymm11
vpxor -96(%rcx), %ymm11, %ymm11
vpxor -64(%rcx), %ymm11, %ymm11
vpxor 32(%rcx), %ymm3, %ymm13
vpxor 64(%rcx), %ymm13, %ymm13
vpxor 96(%rcx), %ymm13, %ymm13
vpxor 128(%rcx), %ymm13, %ymm13
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -96(%rcx), %ymm6, %ymm11
vpxor -96(%rax), %ymm7, %ymm12
vpxor 32(%rcx), %ymm8, %ymm13
vpxor 32(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 608(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 32(%rax)
# Row 1
vpxor 128(%rcx), %ymm8, %ymm10
vpxor -32(%rax), %ymm9, %ymm11
vpxor -32(%rdi), %ymm5, %ymm12
vpxor 96(%rax), %ymm6, %ymm13
vpxor 96(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rcx)
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm4, 96(%rdi)
# Row 2
vpxor -64(%rcx), %ymm6, %ymm10
vpxor 32(%rdi), %ymm7, %ymm11
vpxor 64(%rcx), %ymm8, %ymm12
vpxor 64(%rax), %ymm9, %ymm13
vpxor -96(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rcx)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, 64(%rax)
vmovdqu %ymm4, -96(%rdi)
# Row 3
vpxor (%rax), %ymm9, %ymm10
vpxor (%rdi), %ymm5, %ymm11
vpxor 128(%rax), %ymm6, %ymm12
vpxor 128(%rdi), %ymm7, %ymm13
vpxor (%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rax)
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, 128(%rax)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, (%rcx)
# Row 4
vpxor 64(%rdi), %ymm7, %ymm10
vpxor 96(%rcx), %ymm8, %ymm11
vpxor -64(%rax), %ymm9, %ymm12
vpxor -64(%rdi), %ymm5, %ymm13
vpxor -32(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rdi)
vmovdqu %ymm1, 96(%rcx)
vmovdqu %ymm2, -64(%rax)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, -32(%rcx)
# Round 20
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm4, %ymm14
vpxor -32(%rdi), %ymm2, %ymm12
vpxor (%rdi), %ymm1, %ymm11
vpxor 32(%rdi), %ymm11, %ymm11
vpxor 96(%rdi), %ymm14, %ymm14
vpxor 128(%rdi), %ymm3, %ymm13
vpxor -96(%rax), %ymm12, %ymm12
vpxor -32(%rax), %ymm11, %ymm11
vpxor (%rax), %ymm10, %ymm10
vpxor 32(%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm13, %ymm13
vpxor 96(%rax), %ymm13, %ymm13
vpxor 128(%rax), %ymm12, %ymm12
vpxor -96(%rcx), %ymm11, %ymm11
vpxor -64(%rcx), %ymm10, %ymm10
vpxor (%rcx), %ymm14, %ymm14
vpxor 32(%rcx), %ymm13, %ymm13
vpxor 64(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm10, %ymm10
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -32(%rax), %ymm6, %ymm11
vpxor 64(%rcx), %ymm7, %ymm12
vpxor 128(%rdi), %ymm8, %ymm13
vpxor -32(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 640(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, -32(%rcx)
# Row 1
vpxor 32(%rcx), %ymm8, %ymm10
vpxor 96(%rdi), %ymm9, %ymm11
vpxor -64(%rcx), %ymm5, %ymm12
vpxor (%rdi), %ymm6, %ymm13
vpxor -64(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rcx)
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, (%rdi)
vmovdqu %ymm4, -64(%rax)
# Row 2
vpxor -96(%rcx), %ymm6, %ymm10
vpxor -32(%rdi), %ymm7, %ymm11
vpxor 64(%rax), %ymm8, %ymm12
vpxor (%rcx), %ymm9, %ymm13
vpxor 64(%rdi), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rcx)
vmovdqu %ymm1, -32(%rdi)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, (%rcx)
vmovdqu %ymm4, 64(%rdi)
# Row 3
vpxor 32(%rax), %ymm9, %ymm10
vpxor 128(%rcx), %ymm5, %ymm11
vpxor 32(%rdi), %ymm6, %ymm12
vpxor 128(%rax), %ymm7, %ymm13
vpxor -64(%rdi), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rax)
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, 32(%rdi)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, -64(%rdi)
# Row 4
vpxor -96(%rax), %ymm7, %ymm10
vpxor 96(%rax), %ymm8, %ymm11
vpxor -96(%rdi), %ymm9, %ymm12
vpxor (%rax), %ymm5, %ymm13
vpxor 96(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -96(%rax)
vmovdqu %ymm1, 96(%rax)
vmovdqu %ymm2, -96(%rdi)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, 96(%rcx)
# Round 21
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -64(%rdi), %ymm4, %ymm14
vpxor -32(%rdi), %ymm1, %ymm11
vpxor (%rdi), %ymm3, %ymm13
vpxor 32(%rdi), %ymm2, %ymm12
vpxor 64(%rdi), %ymm14, %ymm14
vpxor 96(%rdi), %ymm11, %ymm11
vpxor 128(%rdi), %ymm13, %ymm13
vpxor -64(%rax), %ymm14, %ymm14
vpxor -32(%rax), %ymm11, %ymm11
vpxor 32(%rax), %ymm10, %ymm10
vpxor 64(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm13, %ymm13
vpxor -96(%rcx), %ymm10, %ymm10
vpxor -64(%rcx), %ymm12, %ymm12
vpxor -32(%rcx), %ymm14, %ymm14
vpxor (%rcx), %ymm13, %ymm13
vpxor 32(%rcx), %ymm10, %ymm10
vpxor 64(%rcx), %ymm12, %ymm12
vpxor 128(%rcx), %ymm11, %ymm11
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor 96(%rdi), %ymm6, %ymm11
vpxor 64(%rax), %ymm7, %ymm12
vpxor 128(%rax), %ymm8, %ymm13
vpxor 96(%rcx), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 672(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, 96(%rdi)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, 128(%rax)
vmovdqu %ymm4, 96(%rcx)
# Row 1
vpxor 128(%rdi), %ymm8, %ymm10
vpxor -64(%rax), %ymm9, %ymm11
vpxor -96(%rcx), %ymm5, %ymm12
vpxor 128(%rcx), %ymm6, %ymm13
vpxor -96(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rdi)
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, 128(%rcx)
vmovdqu %ymm4, -96(%rdi)
# Row 2
vpxor -32(%rax), %ymm6, %ymm10
vpxor -64(%rcx), %ymm7, %ymm11
vpxor (%rcx), %ymm8, %ymm12
vpxor -64(%rdi), %ymm9, %ymm13
vpxor -96(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rax)
vmovdqu %ymm1, -64(%rcx)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, -64(%rdi)
vmovdqu %ymm4, -96(%rax)
# Row 3
vpxor -32(%rcx), %ymm9, %ymm10
vpxor 32(%rcx), %ymm5, %ymm11
vpxor -32(%rdi), %ymm6, %ymm12
vpxor 32(%rdi), %ymm7, %ymm13
vpxor (%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -32(%rcx)
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, -32(%rdi)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, (%rax)
# Row 4
vpxor 64(%rcx), %ymm7, %ymm10
vpxor (%rdi), %ymm8, %ymm11
vpxor 64(%rdi), %ymm9, %ymm12
vpxor 32(%rax), %ymm5, %ymm13
vpxor 96(%rax), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rcx)
vmovdqu %ymm1, (%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, 96(%rax)
# Round 22
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm4, %ymm14
vpxor -64(%rdi), %ymm3, %ymm13
vpxor -32(%rdi), %ymm2, %ymm12
vpxor 32(%rdi), %ymm13, %ymm13
vpxor 96(%rdi), %ymm1, %ymm11
vpxor 128(%rdi), %ymm10, %ymm10
vpxor -96(%rax), %ymm14, %ymm14
vpxor -64(%rax), %ymm11, %ymm11
vpxor -32(%rax), %ymm10, %ymm10
vpxor (%rax), %ymm14, %ymm14
vpxor 64(%rax), %ymm12, %ymm12
vpxor 128(%rax), %ymm13, %ymm13
vpxor -96(%rcx), %ymm12, %ymm12
vpxor -64(%rcx), %ymm11, %ymm11
vpxor -32(%rcx), %ymm10, %ymm10
vpxor (%rcx), %ymm12, %ymm12
vpxor 32(%rcx), %ymm11, %ymm11
vpxor 96(%rcx), %ymm14, %ymm14
vpxor 128(%rcx), %ymm13, %ymm13
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -64(%rax), %ymm6, %ymm11
vpxor (%rcx), %ymm7, %ymm12
vpxor 32(%rdi), %ymm8, %ymm13
vpxor 96(%rax), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 704(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -64(%rax)
vmovdqu %ymm2, (%rcx)
vmovdqu %ymm3, 32(%rdi)
vmovdqu %ymm4, 96(%rax)
# Row 1
vpxor 128(%rax), %ymm8, %ymm10
vpxor -96(%rdi), %ymm9, %ymm11
vpxor -32(%rax), %ymm5, %ymm12
vpxor 32(%rcx), %ymm6, %ymm13
vpxor 64(%rdi), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 128(%rax)
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, -32(%rax)
vmovdqu %ymm3, 32(%rcx)
vmovdqu %ymm4, 64(%rdi)
# Row 2
vpxor 96(%rdi), %ymm6, %ymm10
vpxor -96(%rcx), %ymm7, %ymm11
vpxor -64(%rdi), %ymm8, %ymm12
vpxor (%rax), %ymm9, %ymm13
vpxor 64(%rcx), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rdi)
vmovdqu %ymm1, -96(%rcx)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, (%rax)
vmovdqu %ymm4, 64(%rcx)
# Row 3
vpxor 96(%rcx), %ymm9, %ymm10
vpxor 128(%rdi), %ymm5, %ymm11
vpxor -64(%rcx), %ymm6, %ymm12
vpxor -32(%rdi), %ymm7, %ymm13
vpxor 32(%rax), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rcx)
vmovdqu %ymm1, 128(%rdi)
vmovdqu %ymm2, -64(%rcx)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, 32(%rax)
# Row 4
vpxor 64(%rax), %ymm7, %ymm10
vpxor 128(%rcx), %ymm8, %ymm11
vpxor -96(%rax), %ymm9, %ymm12
vpxor -32(%rcx), %ymm5, %ymm13
vpxor (%rdi), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 64(%rax)
vmovdqu %ymm1, 128(%rcx)
vmovdqu %ymm2, -96(%rax)
vmovdqu %ymm3, -32(%rcx)
vmovdqu %ymm4, (%rdi)
# Round 23
# Calc b[0..4]
vpxor %ymm15, %ymm0, %ymm10
vpxor -96(%rdi), %ymm1, %ymm11
vpxor -64(%rdi), %ymm2, %ymm12
vpxor -32(%rdi), %ymm3, %ymm13
vpxor 32(%rdi), %ymm13, %ymm13
vpxor 64(%rdi), %ymm4, %ymm14
vpxor 96(%rdi), %ymm10, %ymm10
vpxor 128(%rdi), %ymm11, %ymm11
vpxor -64(%rax), %ymm11, %ymm11
vpxor -32(%rax), %ymm12, %ymm12
vpxor (%rax), %ymm13, %ymm13
vpxor 32(%rax), %ymm14, %ymm14
vpxor 96(%rax), %ymm14, %ymm14
vpxor 128(%rax), %ymm10, %ymm10
vpxor -96(%rcx), %ymm11, %ymm11
vpxor -64(%rcx), %ymm12, %ymm12
vpxor (%rcx), %ymm12, %ymm12
vpxor 32(%rcx), %ymm13, %ymm13
vpxor 64(%rcx), %ymm14, %ymm14
vpxor 96(%rcx), %ymm10, %ymm10
# Calc t[0..4]
vpsrlq $63, %ymm11, %ymm0
vpsrlq $63, %ymm12, %ymm1
vpsrlq $63, %ymm13, %ymm2
vpsrlq $63, %ymm14, %ymm3
vpsrlq $63, %ymm10, %ymm4
vpaddq %ymm11, %ymm11, %ymm5
vpaddq %ymm12, %ymm12, %ymm6
vpaddq %ymm13, %ymm13, %ymm7
vpaddq %ymm14, %ymm14, %ymm8
vpaddq %ymm10, %ymm10, %ymm9
vpor %ymm0, %ymm5, %ymm5
vpor %ymm1, %ymm6, %ymm6
vpor %ymm2, %ymm7, %ymm7
vpor %ymm3, %ymm8, %ymm8
vpor %ymm4, %ymm9, %ymm9
vpxor %ymm14, %ymm5, %ymm5
vpxor %ymm10, %ymm6, %ymm6
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm8, %ymm8
vpxor %ymm13, %ymm9, %ymm9
# Row Mix
# Row 0
vpxor %ymm15, %ymm5, %ymm10
vpxor -96(%rdi), %ymm6, %ymm11
vpxor -64(%rdi), %ymm7, %ymm12
vpxor -32(%rdi), %ymm8, %ymm13
vpxor (%rdi), %ymm9, %ymm14
vpsrlq $20, %ymm11, %ymm0
vpsrlq $21, %ymm12, %ymm1
vpsrlq $43, %ymm13, %ymm2
vpsrlq $50, %ymm14, %ymm3
vpsllq $44, %ymm11, %ymm11
vpsllq $43, %ymm12, %ymm12
vpsllq $21, %ymm13, %ymm13
vpsllq $14, %ymm14, %ymm14
vpor %ymm0, %ymm11, %ymm11
vpor %ymm1, %ymm12, %ymm12
vpor %ymm2, %ymm13, %ymm13
vpor %ymm3, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
# XOR in constant
vpxor 736(%rdx), %ymm15, %ymm15
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm1, -96(%rdi)
vmovdqu %ymm2, -64(%rdi)
vmovdqu %ymm3, -32(%rdi)
vmovdqu %ymm4, (%rdi)
# Row 1
vpxor 32(%rdi), %ymm8, %ymm10
vpxor 64(%rdi), %ymm9, %ymm11
vpxor 96(%rdi), %ymm5, %ymm12
vpxor 128(%rdi), %ymm6, %ymm13
vpxor -96(%rax), %ymm7, %ymm14
vpsrlq $36, %ymm10, %ymm0
vpsrlq $44, %ymm11, %ymm1
vpsrlq $61, %ymm12, %ymm2
vpsrlq $19, %ymm13, %ymm3
vpsrlq $3, %ymm14, %ymm4
vpsllq $28, %ymm10, %ymm10
vpsllq $20, %ymm11, %ymm11
vpsllq $3, %ymm12, %ymm12
vpsllq $45, %ymm13, %ymm13
vpsllq $61, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 32(%rdi)
vmovdqu %ymm1, 64(%rdi)
vmovdqu %ymm2, 96(%rdi)
vmovdqu %ymm3, 128(%rdi)
vmovdqu %ymm4, -96(%rax)
# Row 2
vpxor -64(%rax), %ymm6, %ymm10
vpxor -32(%rax), %ymm7, %ymm11
vpxor (%rax), %ymm8, %ymm12
vpxor 32(%rax), %ymm9, %ymm13
vpxor 64(%rax), %ymm5, %ymm14
vpsrlq $63, %ymm10, %ymm0
vpsrlq $58, %ymm11, %ymm1
vpsrlq $39, %ymm12, %ymm2
vpsrlq $56, %ymm13, %ymm3
vpsrlq $46, %ymm14, %ymm4
vpaddq %ymm10, %ymm10, %ymm10
vpsllq $6, %ymm11, %ymm11
vpsllq $25, %ymm12, %ymm12
vpsllq $8, %ymm13, %ymm13
vpsllq $18, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, -64(%rax)
vmovdqu %ymm1, -32(%rax)
vmovdqu %ymm2, (%rax)
vmovdqu %ymm3, 32(%rax)
vmovdqu %ymm4, 64(%rax)
# Row 3
vpxor 96(%rax), %ymm9, %ymm10
vpxor 128(%rax), %ymm5, %ymm11
vpxor -96(%rcx), %ymm6, %ymm12
vpxor -64(%rcx), %ymm7, %ymm13
vpxor -32(%rcx), %ymm8, %ymm14
vpsrlq $37, %ymm10, %ymm0
vpsrlq $28, %ymm11, %ymm1
vpsrlq $54, %ymm12, %ymm2
vpsrlq $49, %ymm13, %ymm3
vpsrlq $8, %ymm14, %ymm4
vpsllq $27, %ymm10, %ymm10
vpsllq $36, %ymm11, %ymm11
vpsllq $10, %ymm12, %ymm12
vpsllq $15, %ymm13, %ymm13
vpsllq $56, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, 96(%rax)
vmovdqu %ymm1, 128(%rax)
vmovdqu %ymm2, -96(%rcx)
vmovdqu %ymm3, -64(%rcx)
vmovdqu %ymm4, -32(%rcx)
# Row 4
vpxor (%rcx), %ymm7, %ymm10
vpxor 32(%rcx), %ymm8, %ymm11
vpxor 64(%rcx), %ymm9, %ymm12
vpxor 96(%rcx), %ymm5, %ymm13
vpxor 128(%rcx), %ymm6, %ymm14
vpsrlq $2, %ymm10, %ymm0
vpsrlq $9, %ymm11, %ymm1
vpsrlq $25, %ymm12, %ymm2
vpsrlq $23, %ymm13, %ymm3
vpsrlq $62, %ymm14, %ymm4
vpsllq $62, %ymm10, %ymm10
vpsllq $55, %ymm11, %ymm11
vpsllq $39, %ymm12, %ymm12
vpsllq $41, %ymm13, %ymm13
vpsllq $2, %ymm14, %ymm14
vpor %ymm0, %ymm10, %ymm10
vpor %ymm1, %ymm11, %ymm11
vpor %ymm2, %ymm12, %ymm12
vpor %ymm3, %ymm13, %ymm13
vpor %ymm4, %ymm14, %ymm14
vpandn %ymm12, %ymm11, %ymm0
vpandn %ymm13, %ymm12, %ymm1
vpandn %ymm14, %ymm13, %ymm2
vpandn %ymm10, %ymm14, %ymm3
vpandn %ymm11, %ymm10, %ymm4
vpxor %ymm10, %ymm0, %ymm0
vpxor %ymm11, %ymm1, %ymm1
vpxor %ymm12, %ymm2, %ymm2
vpxor %ymm13, %ymm3, %ymm3
vpxor %ymm14, %ymm4, %ymm4
vmovdqu %ymm0, (%rcx)
vmovdqu %ymm1, 32(%rcx)
vmovdqu %ymm2, 64(%rcx)
vmovdqu %ymm3, 96(%rcx)
vmovdqu %ymm4, 128(%rcx)
subq $0x80, %rdi
vmovdqu %ymm15, (%rdi)
vzeroupper
repz retq
#ifndef __APPLE__
.size kyber_sha3_256_blocksx4_seed_avx2,.-kyber_sha3_256_blocksx4_seed_avx2
#endif /* __APPLE__ */
#endif /* HAVE_INTEL_AVX2 */
#endif /* WOLFSSL_WC_KYBER */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/IWDG/IWDG_WindowMode/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/IWDG/IWDG_WindowMode/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aenu1/aps3e
| 428,882
|
app/src/main/cpp/rpcs3/3rdparty/wolfssl/wolfssl/wolfcrypt/src/aes_gcm_x86_asm.S
|
/* aes_gcm_x86_asm
*
* Copyright (C) 2006-2023 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifdef WOLFSSL_USER_SETTINGS
#include "wolfssl/wolfcrypt/settings.h"
#endif
#ifndef HAVE_INTEL_AVX1
#define HAVE_INTEL_AVX1
#endif /* HAVE_INTEL_AVX1 */
#ifndef NO_AVX2_SUPPORT
#define HAVE_INTEL_AVX2
#endif /* NO_AVX2_SUPPORT */
.type data, @object
L_aes_gcm_one:
.long 0x0,0x0,0x1,0x0
.type data, @object
L_aes_gcm_two:
.long 0x0,0x0,0x2,0x0
.type data, @object
L_aes_gcm_three:
.long 0x0,0x0,0x3,0x0
.type data, @object
L_aes_gcm_four:
.long 0x0,0x0,0x4,0x0
.type data, @object
L_aes_gcm_bswap_epi64:
.long 0x4050607,0x10203,0xc0d0e0f,0x8090a0b
.type data, @object
L_aes_gcm_bswap_mask:
.long 0xc0d0e0f,0x8090a0b,0x4050607,0x10203
.type data, @object
L_aes_gcm_mod2_128:
.long 0x1,0x0,0x0,0xc2000000
.type data, @object
L_aes_gcm_avx1_one:
.long 0x0,0x0,0x1,0x0
.type data, @object
L_aes_gcm_avx1_two:
.long 0x0,0x0,0x2,0x0
.type data, @object
L_aes_gcm_avx1_three:
.long 0x0,0x0,0x3,0x0
.type data, @object
L_aes_gcm_avx1_four:
.long 0x0,0x0,0x4,0x0
.type data, @object
L_aes_gcm_avx1_bswap_epi64:
.long 0x4050607,0x10203,0xc0d0e0f,0x8090a0b
.type data, @object
L_aes_gcm_avx1_bswap_mask:
.long 0xc0d0e0f,0x8090a0b,0x4050607,0x10203
.type data, @object
L_aes_gcm_avx1_mod2_128:
.long 0x1,0x0,0x0,0xc2000000
.type data, @object
L_aes_gcm_avx2_one:
.long 0x0,0x0,0x1,0x0
.type data, @object
L_aes_gcm_avx2_two:
.long 0x0,0x0,0x2,0x0
.type data, @object
L_aes_gcm_avx2_three:
.long 0x0,0x0,0x3,0x0
.type data, @object
L_aes_gcm_avx2_four:
.long 0x0,0x0,0x4,0x0
.type data, @object
L_avx2_aes_gcm_bswap_one:
.long 0x0,0x0,0x0,0x1000000
.type data, @object
L_aes_gcm_avx2_bswap_epi64:
.long 0x4050607,0x10203,0xc0d0e0f,0x8090a0b
.type data, @object
L_aes_gcm_avx2_bswap_mask:
.long 0xc0d0e0f,0x8090a0b,0x4050607,0x10203
.type data, @object
L_aes_gcm_avx2_mod2_128:
.long 0x1,0x0,0x0,0xc2000000
.text
.globl AES_GCM_encrypt_aesni
.type AES_GCM_encrypt_aesni,@function
.align 16
AES_GCM_encrypt_aesni:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $0x70, %esp
movl 144(%esp), %esi
movl 168(%esp), %ebp
movl 160(%esp), %edx
pxor %xmm0, %xmm0
pxor %xmm2, %xmm2
cmpl $12, %edx
jne L_AES_GCM_encrypt_aesni_iv_not_12
# # Calculate values when IV is 12 bytes
# Set counter based on IV
movl $0x1000000, %ecx
pinsrd $0x00, (%esi), %xmm0
pinsrd $0x01, 4(%esi), %xmm0
pinsrd $2, 8(%esi), %xmm0
pinsrd $3, %ecx, %xmm0
# H = Encrypt X(=0) and T = Encrypt counter
movdqa %xmm0, %xmm5
movdqa (%ebp), %xmm1
pxor %xmm1, %xmm5
movdqa 16(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 32(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 48(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 64(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 80(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 96(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 112(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 128(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 144(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
cmpl $11, 172(%esp)
movdqa 160(%ebp), %xmm3
jl L_AES_GCM_encrypt_aesni_calc_iv_12_last
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 176(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
cmpl $13, 172(%esp)
movdqa 192(%ebp), %xmm3
jl L_AES_GCM_encrypt_aesni_calc_iv_12_last
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 208(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 224(%ebp), %xmm3
L_AES_GCM_encrypt_aesni_calc_iv_12_last:
aesenclast %xmm3, %xmm1
aesenclast %xmm3, %xmm5
pshufb L_aes_gcm_bswap_mask, %xmm1
movdqu %xmm5, 80(%esp)
jmp L_AES_GCM_encrypt_aesni_iv_done
L_AES_GCM_encrypt_aesni_iv_not_12:
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
movdqa (%ebp), %xmm1
aesenc 16(%ebp), %xmm1
aesenc 32(%ebp), %xmm1
aesenc 48(%ebp), %xmm1
aesenc 64(%ebp), %xmm1
aesenc 80(%ebp), %xmm1
aesenc 96(%ebp), %xmm1
aesenc 112(%ebp), %xmm1
aesenc 128(%ebp), %xmm1
aesenc 144(%ebp), %xmm1
cmpl $11, 172(%esp)
movdqa 160(%ebp), %xmm5
jl L_AES_GCM_encrypt_aesni_calc_iv_1_aesenc_avx_last
aesenc %xmm5, %xmm1
aesenc 176(%ebp), %xmm1
cmpl $13, 172(%esp)
movdqa 192(%ebp), %xmm5
jl L_AES_GCM_encrypt_aesni_calc_iv_1_aesenc_avx_last
aesenc %xmm5, %xmm1
aesenc 208(%ebp), %xmm1
movdqa 224(%ebp), %xmm5
L_AES_GCM_encrypt_aesni_calc_iv_1_aesenc_avx_last:
aesenclast %xmm5, %xmm1
pshufb L_aes_gcm_bswap_mask, %xmm1
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movl $0x00, %ecx
je L_AES_GCM_encrypt_aesni_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_encrypt_aesni_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_encrypt_aesni_calc_iv_16_loop:
movdqu (%esi,%ecx,1), %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm4, %xmm0
pshufd $0x4e, %xmm0, %xmm5
pshufd $0x4e, %xmm1, %xmm6
movdqa %xmm1, %xmm7
movdqa %xmm1, %xmm4
pclmulqdq $0x11, %xmm0, %xmm7
pclmulqdq $0x00, %xmm0, %xmm4
pxor %xmm0, %xmm5
pxor %xmm1, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm4, %xmm3
movdqa %xmm7, %xmm0
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm3
pxor %xmm5, %xmm0
movdqa %xmm3, %xmm4
movdqa %xmm0, %xmm5
psrld $31, %xmm4
psrld $31, %xmm5
pslld $0x01, %xmm3
pslld $0x01, %xmm0
movdqa %xmm4, %xmm6
pslldq $4, %xmm4
psrldq $12, %xmm6
pslldq $4, %xmm5
por %xmm6, %xmm0
por %xmm4, %xmm3
por %xmm5, %xmm0
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm5
movdqa %xmm3, %xmm6
pslld $31, %xmm4
pslld $30, %xmm5
pslld $25, %xmm6
pxor %xmm5, %xmm4
pxor %xmm6, %xmm4
movdqa %xmm4, %xmm5
psrldq $4, %xmm5
pslldq $12, %xmm4
pxor %xmm4, %xmm3
movdqa %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
psrld $0x01, %xmm6
psrld $2, %xmm7
psrld $7, %xmm4
pxor %xmm7, %xmm6
pxor %xmm4, %xmm6
pxor %xmm5, %xmm6
pxor %xmm3, %xmm6
pxor %xmm6, %xmm0
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_aesni_calc_iv_16_loop
movl 160(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_encrypt_aesni_calc_iv_done
L_AES_GCM_encrypt_aesni_calc_iv_lt16:
subl $16, %esp
pxor %xmm4, %xmm4
xorl %ebx, %ebx
movdqu %xmm4, (%esp)
L_AES_GCM_encrypt_aesni_calc_iv_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_aesni_calc_iv_loop
movdqu (%esp), %xmm4
addl $16, %esp
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm4, %xmm0
pshufd $0x4e, %xmm0, %xmm5
pshufd $0x4e, %xmm1, %xmm6
movdqa %xmm1, %xmm7
movdqa %xmm1, %xmm4
pclmulqdq $0x11, %xmm0, %xmm7
pclmulqdq $0x00, %xmm0, %xmm4
pxor %xmm0, %xmm5
pxor %xmm1, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm4, %xmm3
movdqa %xmm7, %xmm0
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm3
pxor %xmm5, %xmm0
movdqa %xmm3, %xmm4
movdqa %xmm0, %xmm5
psrld $31, %xmm4
psrld $31, %xmm5
pslld $0x01, %xmm3
pslld $0x01, %xmm0
movdqa %xmm4, %xmm6
pslldq $4, %xmm4
psrldq $12, %xmm6
pslldq $4, %xmm5
por %xmm6, %xmm0
por %xmm4, %xmm3
por %xmm5, %xmm0
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm5
movdqa %xmm3, %xmm6
pslld $31, %xmm4
pslld $30, %xmm5
pslld $25, %xmm6
pxor %xmm5, %xmm4
pxor %xmm6, %xmm4
movdqa %xmm4, %xmm5
psrldq $4, %xmm5
pslldq $12, %xmm4
pxor %xmm4, %xmm3
movdqa %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
psrld $0x01, %xmm6
psrld $2, %xmm7
psrld $7, %xmm4
pxor %xmm7, %xmm6
pxor %xmm4, %xmm6
pxor %xmm5, %xmm6
pxor %xmm3, %xmm6
pxor %xmm6, %xmm0
L_AES_GCM_encrypt_aesni_calc_iv_done:
# T = Encrypt counter
pxor %xmm4, %xmm4
shll $3, %edx
pinsrd $0x00, %edx, %xmm4
pxor %xmm4, %xmm0
pshufd $0x4e, %xmm0, %xmm5
pshufd $0x4e, %xmm1, %xmm6
movdqa %xmm1, %xmm7
movdqa %xmm1, %xmm4
pclmulqdq $0x11, %xmm0, %xmm7
pclmulqdq $0x00, %xmm0, %xmm4
pxor %xmm0, %xmm5
pxor %xmm1, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm4, %xmm3
movdqa %xmm7, %xmm0
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm3
pxor %xmm5, %xmm0
movdqa %xmm3, %xmm4
movdqa %xmm0, %xmm5
psrld $31, %xmm4
psrld $31, %xmm5
pslld $0x01, %xmm3
pslld $0x01, %xmm0
movdqa %xmm4, %xmm6
pslldq $4, %xmm4
psrldq $12, %xmm6
pslldq $4, %xmm5
por %xmm6, %xmm0
por %xmm4, %xmm3
por %xmm5, %xmm0
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm5
movdqa %xmm3, %xmm6
pslld $31, %xmm4
pslld $30, %xmm5
pslld $25, %xmm6
pxor %xmm5, %xmm4
pxor %xmm6, %xmm4
movdqa %xmm4, %xmm5
psrldq $4, %xmm5
pslldq $12, %xmm4
pxor %xmm4, %xmm3
movdqa %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
psrld $0x01, %xmm6
psrld $2, %xmm7
psrld $7, %xmm4
pxor %xmm7, %xmm6
pxor %xmm4, %xmm6
pxor %xmm5, %xmm6
pxor %xmm3, %xmm6
pxor %xmm6, %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm0
# Encrypt counter
movdqa (%ebp), %xmm4
pxor %xmm0, %xmm4
aesenc 16(%ebp), %xmm4
aesenc 32(%ebp), %xmm4
aesenc 48(%ebp), %xmm4
aesenc 64(%ebp), %xmm4
aesenc 80(%ebp), %xmm4
aesenc 96(%ebp), %xmm4
aesenc 112(%ebp), %xmm4
aesenc 128(%ebp), %xmm4
aesenc 144(%ebp), %xmm4
cmpl $11, 172(%esp)
movdqa 160(%ebp), %xmm5
jl L_AES_GCM_encrypt_aesni_calc_iv_2_aesenc_avx_last
aesenc %xmm5, %xmm4
aesenc 176(%ebp), %xmm4
cmpl $13, 172(%esp)
movdqa 192(%ebp), %xmm5
jl L_AES_GCM_encrypt_aesni_calc_iv_2_aesenc_avx_last
aesenc %xmm5, %xmm4
aesenc 208(%ebp), %xmm4
movdqa 224(%ebp), %xmm5
L_AES_GCM_encrypt_aesni_calc_iv_2_aesenc_avx_last:
aesenclast %xmm5, %xmm4
movdqu %xmm4, 80(%esp)
L_AES_GCM_encrypt_aesni_iv_done:
movl 140(%esp), %esi
# Additional authentication data
movl 156(%esp), %edx
cmpl $0x00, %edx
je L_AES_GCM_encrypt_aesni_calc_aad_done
xorl %ecx, %ecx
cmpl $16, %edx
jl L_AES_GCM_encrypt_aesni_calc_aad_lt16
andl $0xfffffff0, %edx
L_AES_GCM_encrypt_aesni_calc_aad_16_loop:
movdqu (%esi,%ecx,1), %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm4, %xmm2
pshufd $0x4e, %xmm2, %xmm5
pshufd $0x4e, %xmm1, %xmm6
movdqa %xmm1, %xmm7
movdqa %xmm1, %xmm4
pclmulqdq $0x11, %xmm2, %xmm7
pclmulqdq $0x00, %xmm2, %xmm4
pxor %xmm2, %xmm5
pxor %xmm1, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm4, %xmm3
movdqa %xmm7, %xmm2
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm3
pxor %xmm5, %xmm2
movdqa %xmm3, %xmm4
movdqa %xmm2, %xmm5
psrld $31, %xmm4
psrld $31, %xmm5
pslld $0x01, %xmm3
pslld $0x01, %xmm2
movdqa %xmm4, %xmm6
pslldq $4, %xmm4
psrldq $12, %xmm6
pslldq $4, %xmm5
por %xmm6, %xmm2
por %xmm4, %xmm3
por %xmm5, %xmm2
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm5
movdqa %xmm3, %xmm6
pslld $31, %xmm4
pslld $30, %xmm5
pslld $25, %xmm6
pxor %xmm5, %xmm4
pxor %xmm6, %xmm4
movdqa %xmm4, %xmm5
psrldq $4, %xmm5
pslldq $12, %xmm4
pxor %xmm4, %xmm3
movdqa %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
psrld $0x01, %xmm6
psrld $2, %xmm7
psrld $7, %xmm4
pxor %xmm7, %xmm6
pxor %xmm4, %xmm6
pxor %xmm5, %xmm6
pxor %xmm3, %xmm6
pxor %xmm6, %xmm2
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_aesni_calc_aad_16_loop
movl 156(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_encrypt_aesni_calc_aad_done
L_AES_GCM_encrypt_aesni_calc_aad_lt16:
subl $16, %esp
pxor %xmm4, %xmm4
xorl %ebx, %ebx
movdqu %xmm4, (%esp)
L_AES_GCM_encrypt_aesni_calc_aad_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_aesni_calc_aad_loop
movdqu (%esp), %xmm4
addl $16, %esp
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm4, %xmm2
pshufd $0x4e, %xmm2, %xmm5
pshufd $0x4e, %xmm1, %xmm6
movdqa %xmm1, %xmm7
movdqa %xmm1, %xmm4
pclmulqdq $0x11, %xmm2, %xmm7
pclmulqdq $0x00, %xmm2, %xmm4
pxor %xmm2, %xmm5
pxor %xmm1, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm4, %xmm3
movdqa %xmm7, %xmm2
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm3
pxor %xmm5, %xmm2
movdqa %xmm3, %xmm4
movdqa %xmm2, %xmm5
psrld $31, %xmm4
psrld $31, %xmm5
pslld $0x01, %xmm3
pslld $0x01, %xmm2
movdqa %xmm4, %xmm6
pslldq $4, %xmm4
psrldq $12, %xmm6
pslldq $4, %xmm5
por %xmm6, %xmm2
por %xmm4, %xmm3
por %xmm5, %xmm2
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm5
movdqa %xmm3, %xmm6
pslld $31, %xmm4
pslld $30, %xmm5
pslld $25, %xmm6
pxor %xmm5, %xmm4
pxor %xmm6, %xmm4
movdqa %xmm4, %xmm5
psrldq $4, %xmm5
pslldq $12, %xmm4
pxor %xmm4, %xmm3
movdqa %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
psrld $0x01, %xmm6
psrld $2, %xmm7
psrld $7, %xmm4
pxor %xmm7, %xmm6
pxor %xmm4, %xmm6
pxor %xmm5, %xmm6
pxor %xmm3, %xmm6
pxor %xmm6, %xmm2
L_AES_GCM_encrypt_aesni_calc_aad_done:
movdqu %xmm2, 96(%esp)
movl 132(%esp), %esi
movl 136(%esp), %edi
# Calculate counter and H
pshufb L_aes_gcm_bswap_epi64, %xmm0
movdqa %xmm1, %xmm5
paddd L_aes_gcm_one, %xmm0
movdqa %xmm1, %xmm4
movdqu %xmm0, 64(%esp)
psrlq $63, %xmm5
psllq $0x01, %xmm4
pslldq $8, %xmm5
por %xmm5, %xmm4
pshufd $0xff, %xmm1, %xmm1
psrad $31, %xmm1
pand L_aes_gcm_mod2_128, %xmm1
pxor %xmm4, %xmm1
xorl %ebx, %ebx
movl 152(%esp), %eax
cmpl $0x40, %eax
jl L_AES_GCM_encrypt_aesni_done_64
andl $0xffffffc0, %eax
movdqa %xmm2, %xmm6
# H ^ 1
movdqu %xmm1, (%esp)
# H ^ 2
pshufd $0x4e, %xmm1, %xmm5
pshufd $0x4e, %xmm1, %xmm6
movdqa %xmm1, %xmm7
movdqa %xmm1, %xmm4
pclmulqdq $0x11, %xmm1, %xmm7
pclmulqdq $0x00, %xmm1, %xmm4
pxor %xmm1, %xmm5
pxor %xmm1, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm7, %xmm0
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm4
pxor %xmm5, %xmm0
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pslld $31, %xmm5
pslld $30, %xmm6
pslld $25, %xmm7
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm7
psrldq $4, %xmm7
pslldq $12, %xmm5
pxor %xmm5, %xmm4
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
psrld $0x01, %xmm5
psrld $2, %xmm6
pxor %xmm6, %xmm5
pxor %xmm4, %xmm5
psrld $7, %xmm4
pxor %xmm7, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm0
movdqu %xmm0, 16(%esp)
# H ^ 3
pshufd $0x4e, %xmm1, %xmm5
pshufd $0x4e, %xmm0, %xmm6
movdqa %xmm0, %xmm7
movdqa %xmm0, %xmm4
pclmulqdq $0x11, %xmm1, %xmm7
pclmulqdq $0x00, %xmm1, %xmm4
pxor %xmm1, %xmm5
pxor %xmm0, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm7, %xmm3
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm4
pxor %xmm5, %xmm3
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pslld $31, %xmm5
pslld $30, %xmm6
pslld $25, %xmm7
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm7
psrldq $4, %xmm7
pslldq $12, %xmm5
pxor %xmm5, %xmm4
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
psrld $0x01, %xmm5
psrld $2, %xmm6
pxor %xmm6, %xmm5
pxor %xmm4, %xmm5
psrld $7, %xmm4
pxor %xmm7, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm3
movdqu %xmm3, 32(%esp)
# H ^ 4
pshufd $0x4e, %xmm0, %xmm5
pshufd $0x4e, %xmm0, %xmm6
movdqa %xmm0, %xmm7
movdqa %xmm0, %xmm4
pclmulqdq $0x11, %xmm0, %xmm7
pclmulqdq $0x00, %xmm0, %xmm4
pxor %xmm0, %xmm5
pxor %xmm0, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm7, %xmm3
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm4
pxor %xmm5, %xmm3
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pslld $31, %xmm5
pslld $30, %xmm6
pslld $25, %xmm7
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm7
psrldq $4, %xmm7
pslldq $12, %xmm5
pxor %xmm5, %xmm4
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
psrld $0x01, %xmm5
psrld $2, %xmm6
pxor %xmm6, %xmm5
pxor %xmm4, %xmm5
psrld $7, %xmm4
pxor %xmm7, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm3
movdqu %xmm3, 48(%esp)
# First 64 bytes of input
# Encrypt 64 bytes of counter
movdqu 64(%esp), %xmm4
movdqa L_aes_gcm_bswap_epi64, %xmm3
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pshufb %xmm3, %xmm4
paddd L_aes_gcm_one, %xmm5
pshufb %xmm3, %xmm5
paddd L_aes_gcm_two, %xmm6
pshufb %xmm3, %xmm6
paddd L_aes_gcm_three, %xmm7
pshufb %xmm3, %xmm7
movdqu 64(%esp), %xmm3
paddd L_aes_gcm_four, %xmm3
movdqu %xmm3, 64(%esp)
movdqa (%ebp), %xmm3
pxor %xmm3, %xmm4
pxor %xmm3, %xmm5
pxor %xmm3, %xmm6
pxor %xmm3, %xmm7
movdqa 16(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 32(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 48(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 64(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 80(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 96(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 112(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 128(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 144(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
cmpl $11, 172(%esp)
movdqa 160(%ebp), %xmm3
jl L_AES_GCM_encrypt_aesni_enc_done
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 176(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
cmpl $13, 172(%esp)
movdqa 192(%ebp), %xmm3
jl L_AES_GCM_encrypt_aesni_enc_done
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 208(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 224(%ebp), %xmm3
L_AES_GCM_encrypt_aesni_enc_done:
aesenclast %xmm3, %xmm4
aesenclast %xmm3, %xmm5
movdqu (%esi), %xmm0
movdqu 16(%esi), %xmm1
pxor %xmm0, %xmm4
pxor %xmm1, %xmm5
movdqu %xmm4, (%edi)
movdqu %xmm5, 16(%edi)
aesenclast %xmm3, %xmm6
aesenclast %xmm3, %xmm7
movdqu 32(%esi), %xmm0
movdqu 48(%esi), %xmm1
pxor %xmm0, %xmm6
pxor %xmm1, %xmm7
movdqu %xmm6, 32(%edi)
movdqu %xmm7, 48(%edi)
cmpl $0x40, %eax
movl $0x40, %ebx
movl %esi, %ecx
movl %edi, %edx
jle L_AES_GCM_encrypt_aesni_end_64
# More 64 bytes of input
L_AES_GCM_encrypt_aesni_ghash_64:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# Encrypt 64 bytes of counter
movdqu 64(%esp), %xmm4
movdqa L_aes_gcm_bswap_epi64, %xmm3
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pshufb %xmm3, %xmm4
paddd L_aes_gcm_one, %xmm5
pshufb %xmm3, %xmm5
paddd L_aes_gcm_two, %xmm6
pshufb %xmm3, %xmm6
paddd L_aes_gcm_three, %xmm7
pshufb %xmm3, %xmm7
movdqu 64(%esp), %xmm3
paddd L_aes_gcm_four, %xmm3
movdqu %xmm3, 64(%esp)
movdqa (%ebp), %xmm3
pxor %xmm3, %xmm4
pxor %xmm3, %xmm5
pxor %xmm3, %xmm6
pxor %xmm3, %xmm7
movdqa 16(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 32(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 48(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 64(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 80(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 96(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 112(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 128(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 144(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
cmpl $11, 172(%esp)
movdqa 160(%ebp), %xmm3
jl L_AES_GCM_encrypt_aesni_aesenc_64_ghash_avx_done
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 176(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
cmpl $13, 172(%esp)
movdqa 192(%ebp), %xmm3
jl L_AES_GCM_encrypt_aesni_aesenc_64_ghash_avx_done
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 208(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 224(%ebp), %xmm3
L_AES_GCM_encrypt_aesni_aesenc_64_ghash_avx_done:
aesenclast %xmm3, %xmm4
aesenclast %xmm3, %xmm5
movdqu (%ecx), %xmm0
movdqu 16(%ecx), %xmm1
pxor %xmm0, %xmm4
pxor %xmm1, %xmm5
movdqu %xmm4, (%edx)
movdqu %xmm5, 16(%edx)
aesenclast %xmm3, %xmm6
aesenclast %xmm3, %xmm7
movdqu 32(%ecx), %xmm0
movdqu 48(%ecx), %xmm1
pxor %xmm0, %xmm6
pxor %xmm1, %xmm7
movdqu %xmm6, 32(%edx)
movdqu %xmm7, 48(%edx)
# ghash encrypted counter
movdqu 96(%esp), %xmm6
movdqu 48(%esp), %xmm3
movdqu -64(%edx), %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm6, %xmm4
pshufd $0x4e, %xmm3, %xmm5
pshufd $0x4e, %xmm4, %xmm1
pxor %xmm3, %xmm5
pxor %xmm4, %xmm1
movdqa %xmm4, %xmm7
pclmulqdq $0x11, %xmm3, %xmm7
movdqa %xmm4, %xmm6
pclmulqdq $0x00, %xmm3, %xmm6
pclmulqdq $0x00, %xmm1, %xmm5
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqu 32(%esp), %xmm3
movdqu -48(%edx), %xmm4
pshufd $0x4e, %xmm3, %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm3, %xmm0
pshufd $0x4e, %xmm4, %xmm1
pxor %xmm4, %xmm1
movdqa %xmm4, %xmm2
pclmulqdq $0x11, %xmm3, %xmm2
pclmulqdq $0x00, %xmm4, %xmm3
pclmulqdq $0x00, %xmm1, %xmm0
pxor %xmm3, %xmm5
pxor %xmm3, %xmm6
pxor %xmm2, %xmm5
pxor %xmm2, %xmm7
pxor %xmm0, %xmm5
movdqu 16(%esp), %xmm3
movdqu -32(%edx), %xmm4
pshufd $0x4e, %xmm3, %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm3, %xmm0
pshufd $0x4e, %xmm4, %xmm1
pxor %xmm4, %xmm1
movdqa %xmm4, %xmm2
pclmulqdq $0x11, %xmm3, %xmm2
pclmulqdq $0x00, %xmm4, %xmm3
pclmulqdq $0x00, %xmm1, %xmm0
pxor %xmm3, %xmm5
pxor %xmm3, %xmm6
pxor %xmm2, %xmm5
pxor %xmm2, %xmm7
pxor %xmm0, %xmm5
movdqu (%esp), %xmm3
movdqu -16(%edx), %xmm4
pshufd $0x4e, %xmm3, %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm3, %xmm0
pshufd $0x4e, %xmm4, %xmm1
pxor %xmm4, %xmm1
movdqa %xmm4, %xmm2
pclmulqdq $0x11, %xmm3, %xmm2
pclmulqdq $0x00, %xmm4, %xmm3
pclmulqdq $0x00, %xmm1, %xmm0
pxor %xmm3, %xmm5
pxor %xmm3, %xmm6
pxor %xmm2, %xmm5
pxor %xmm2, %xmm7
pxor %xmm0, %xmm5
movdqa %xmm5, %xmm1
psrldq $8, %xmm5
pslldq $8, %xmm1
pxor %xmm1, %xmm6
pxor %xmm5, %xmm7
movdqa %xmm6, %xmm3
movdqa %xmm6, %xmm0
movdqa %xmm6, %xmm1
pslld $31, %xmm3
pslld $30, %xmm0
pslld $25, %xmm1
pxor %xmm0, %xmm3
pxor %xmm1, %xmm3
movdqa %xmm3, %xmm0
pslldq $12, %xmm3
psrldq $4, %xmm0
pxor %xmm3, %xmm6
movdqa %xmm6, %xmm1
movdqa %xmm6, %xmm5
movdqa %xmm6, %xmm4
psrld $0x01, %xmm1
psrld $2, %xmm5
psrld $7, %xmm4
pxor %xmm5, %xmm1
pxor %xmm4, %xmm1
pxor %xmm0, %xmm1
pxor %xmm1, %xmm6
pxor %xmm7, %xmm6
movdqu %xmm6, 96(%esp)
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_encrypt_aesni_ghash_64
L_AES_GCM_encrypt_aesni_end_64:
movdqu 96(%esp), %xmm2
# Block 1
movdqa L_aes_gcm_bswap_mask, %xmm4
movdqu (%edx), %xmm1
pshufb %xmm4, %xmm1
movdqu 48(%esp), %xmm3
pxor %xmm2, %xmm1
pshufd $0x4e, %xmm1, %xmm5
pshufd $0x4e, %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
pclmulqdq $0x11, %xmm1, %xmm7
pclmulqdq $0x00, %xmm1, %xmm4
pxor %xmm1, %xmm5
pxor %xmm3, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm4, %xmm0
movdqa %xmm7, %xmm2
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm0
pxor %xmm5, %xmm2
# Block 2
movdqa L_aes_gcm_bswap_mask, %xmm4
movdqu 16(%edx), %xmm1
pshufb %xmm4, %xmm1
movdqu 32(%esp), %xmm3
pshufd $0x4e, %xmm1, %xmm5
pshufd $0x4e, %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
pclmulqdq $0x11, %xmm1, %xmm7
pclmulqdq $0x00, %xmm1, %xmm4
pxor %xmm1, %xmm5
pxor %xmm3, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
pxor %xmm4, %xmm0
pxor %xmm7, %xmm2
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm0
pxor %xmm5, %xmm2
# Block 3
movdqa L_aes_gcm_bswap_mask, %xmm4
movdqu 32(%edx), %xmm1
pshufb %xmm4, %xmm1
movdqu 16(%esp), %xmm3
pshufd $0x4e, %xmm1, %xmm5
pshufd $0x4e, %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
pclmulqdq $0x11, %xmm1, %xmm7
pclmulqdq $0x00, %xmm1, %xmm4
pxor %xmm1, %xmm5
pxor %xmm3, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
pxor %xmm4, %xmm0
pxor %xmm7, %xmm2
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm0
pxor %xmm5, %xmm2
# Block 4
movdqa L_aes_gcm_bswap_mask, %xmm4
movdqu 48(%edx), %xmm1
pshufb %xmm4, %xmm1
movdqu (%esp), %xmm3
pshufd $0x4e, %xmm1, %xmm5
pshufd $0x4e, %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
pclmulqdq $0x11, %xmm1, %xmm7
pclmulqdq $0x00, %xmm1, %xmm4
pxor %xmm1, %xmm5
pxor %xmm3, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
pxor %xmm4, %xmm0
pxor %xmm7, %xmm2
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm0
pxor %xmm5, %xmm2
movdqa %xmm0, %xmm4
movdqa %xmm0, %xmm5
movdqa %xmm0, %xmm6
pslld $31, %xmm4
pslld $30, %xmm5
pslld $25, %xmm6
pxor %xmm5, %xmm4
pxor %xmm6, %xmm4
movdqa %xmm4, %xmm5
psrldq $4, %xmm5
pslldq $12, %xmm4
pxor %xmm4, %xmm0
movdqa %xmm0, %xmm6
movdqa %xmm0, %xmm7
movdqa %xmm0, %xmm4
psrld $0x01, %xmm6
psrld $2, %xmm7
psrld $7, %xmm4
pxor %xmm7, %xmm6
pxor %xmm4, %xmm6
pxor %xmm5, %xmm6
pxor %xmm0, %xmm6
pxor %xmm6, %xmm2
movdqu (%esp), %xmm1
L_AES_GCM_encrypt_aesni_done_64:
movl 152(%esp), %edx
cmpl %edx, %ebx
jge L_AES_GCM_encrypt_aesni_done_enc
movl 152(%esp), %eax
andl $0xfffffff0, %eax
cmpl %eax, %ebx
jge L_AES_GCM_encrypt_aesni_last_block_done
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
movdqu 64(%esp), %xmm4
movdqa %xmm4, %xmm5
pshufb L_aes_gcm_bswap_epi64, %xmm4
paddd L_aes_gcm_one, %xmm5
pxor (%ebp), %xmm4
movdqu %xmm5, 64(%esp)
aesenc 16(%ebp), %xmm4
aesenc 32(%ebp), %xmm4
aesenc 48(%ebp), %xmm4
aesenc 64(%ebp), %xmm4
aesenc 80(%ebp), %xmm4
aesenc 96(%ebp), %xmm4
aesenc 112(%ebp), %xmm4
aesenc 128(%ebp), %xmm4
aesenc 144(%ebp), %xmm4
cmpl $11, 172(%esp)
movdqa 160(%ebp), %xmm5
jl L_AES_GCM_encrypt_aesni_aesenc_block_aesenc_avx_last
aesenc %xmm5, %xmm4
aesenc 176(%ebp), %xmm4
cmpl $13, 172(%esp)
movdqa 192(%ebp), %xmm5
jl L_AES_GCM_encrypt_aesni_aesenc_block_aesenc_avx_last
aesenc %xmm5, %xmm4
aesenc 208(%ebp), %xmm4
movdqa 224(%ebp), %xmm5
L_AES_GCM_encrypt_aesni_aesenc_block_aesenc_avx_last:
aesenclast %xmm5, %xmm4
movdqu (%ecx), %xmm5
pxor %xmm5, %xmm4
movdqu %xmm4, (%edx)
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm4, %xmm2
addl $16, %ebx
cmpl %eax, %ebx
jge L_AES_GCM_encrypt_aesni_last_block_ghash
L_AES_GCM_encrypt_aesni_last_block_start:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
movdqu 64(%esp), %xmm4
movdqa %xmm4, %xmm5
pshufb L_aes_gcm_bswap_epi64, %xmm4
paddd L_aes_gcm_one, %xmm5
pxor (%ebp), %xmm4
movdqu %xmm5, 64(%esp)
movdqu %xmm2, %xmm0
pclmulqdq $16, %xmm1, %xmm0
aesenc 16(%ebp), %xmm4
aesenc 32(%ebp), %xmm4
movdqu %xmm2, %xmm3
pclmulqdq $0x01, %xmm1, %xmm3
aesenc 48(%ebp), %xmm4
aesenc 64(%ebp), %xmm4
aesenc 80(%ebp), %xmm4
movdqu %xmm2, %xmm5
pclmulqdq $0x11, %xmm1, %xmm5
aesenc 96(%ebp), %xmm4
pxor %xmm3, %xmm0
movdqa %xmm0, %xmm6
psrldq $8, %xmm0
pslldq $8, %xmm6
aesenc 112(%ebp), %xmm4
movdqu %xmm2, %xmm3
pclmulqdq $0x00, %xmm1, %xmm3
pxor %xmm3, %xmm6
pxor %xmm0, %xmm5
movdqa L_aes_gcm_mod2_128, %xmm7
movdqa %xmm6, %xmm3
pclmulqdq $16, %xmm7, %xmm3
aesenc 128(%ebp), %xmm4
pshufd $0x4e, %xmm6, %xmm0
pxor %xmm3, %xmm0
movdqa %xmm0, %xmm3
pclmulqdq $16, %xmm7, %xmm3
aesenc 144(%ebp), %xmm4
pshufd $0x4e, %xmm0, %xmm2
pxor %xmm3, %xmm2
pxor %xmm5, %xmm2
cmpl $11, 172(%esp)
movdqa 160(%ebp), %xmm5
jl L_AES_GCM_encrypt_aesni_aesenc_gfmul_last
aesenc %xmm5, %xmm4
aesenc 176(%ebp), %xmm4
cmpl $13, 172(%esp)
movdqa 192(%ebp), %xmm5
jl L_AES_GCM_encrypt_aesni_aesenc_gfmul_last
aesenc %xmm5, %xmm4
aesenc 208(%ebp), %xmm4
movdqa 224(%ebp), %xmm5
L_AES_GCM_encrypt_aesni_aesenc_gfmul_last:
aesenclast %xmm5, %xmm4
movdqu (%ecx), %xmm5
pxor %xmm5, %xmm4
movdqu %xmm4, (%edx)
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm4, %xmm2
addl $16, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_encrypt_aesni_last_block_start
L_AES_GCM_encrypt_aesni_last_block_ghash:
pshufd $0x4e, %xmm1, %xmm5
pshufd $0x4e, %xmm2, %xmm6
movdqa %xmm2, %xmm7
movdqa %xmm2, %xmm4
pclmulqdq $0x11, %xmm1, %xmm7
pclmulqdq $0x00, %xmm1, %xmm4
pxor %xmm1, %xmm5
pxor %xmm2, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm7, %xmm2
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm4
pxor %xmm5, %xmm2
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pslld $31, %xmm5
pslld $30, %xmm6
pslld $25, %xmm7
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm7
psrldq $4, %xmm7
pslldq $12, %xmm5
pxor %xmm5, %xmm4
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
psrld $0x01, %xmm5
psrld $2, %xmm6
pxor %xmm6, %xmm5
pxor %xmm4, %xmm5
psrld $7, %xmm4
pxor %xmm7, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm2
L_AES_GCM_encrypt_aesni_last_block_done:
movl 152(%esp), %ecx
movl %ecx, %edx
andl $15, %ecx
jz L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_done
movdqu 64(%esp), %xmm0
pshufb L_aes_gcm_bswap_epi64, %xmm0
pxor (%ebp), %xmm0
aesenc 16(%ebp), %xmm0
aesenc 32(%ebp), %xmm0
aesenc 48(%ebp), %xmm0
aesenc 64(%ebp), %xmm0
aesenc 80(%ebp), %xmm0
aesenc 96(%ebp), %xmm0
aesenc 112(%ebp), %xmm0
aesenc 128(%ebp), %xmm0
aesenc 144(%ebp), %xmm0
cmpl $11, 172(%esp)
movdqa 160(%ebp), %xmm5
jl L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_aesenc_avx_last
aesenc %xmm5, %xmm0
aesenc 176(%ebp), %xmm0
cmpl $13, 172(%esp)
movdqa 192(%ebp), %xmm5
jl L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_aesenc_avx_last
aesenc %xmm5, %xmm0
aesenc 208(%ebp), %xmm0
movdqa 224(%ebp), %xmm5
L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_aesenc_avx_last:
aesenclast %xmm5, %xmm0
subl $16, %esp
xorl %ecx, %ecx
movdqu %xmm0, (%esp)
L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_loop:
movzbl (%esi,%ebx,1), %eax
xorb (%esp,%ecx,1), %al
movb %al, (%edi,%ebx,1)
movb %al, (%esp,%ecx,1)
incl %ebx
incl %ecx
cmpl %edx, %ebx
jl L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_loop
xorl %eax, %eax
cmpl $16, %ecx
je L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_finish_enc
L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_byte_loop:
movb %al, (%esp,%ecx,1)
incl %ecx
cmpl $16, %ecx
jl L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_byte_loop
L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_finish_enc:
movdqu (%esp), %xmm0
addl $16, %esp
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm0, %xmm2
pshufd $0x4e, %xmm1, %xmm5
pshufd $0x4e, %xmm2, %xmm6
movdqa %xmm2, %xmm7
movdqa %xmm2, %xmm4
pclmulqdq $0x11, %xmm1, %xmm7
pclmulqdq $0x00, %xmm1, %xmm4
pxor %xmm1, %xmm5
pxor %xmm2, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm7, %xmm2
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm4
pxor %xmm5, %xmm2
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pslld $31, %xmm5
pslld $30, %xmm6
pslld $25, %xmm7
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm7
psrldq $4, %xmm7
pslldq $12, %xmm5
pxor %xmm5, %xmm4
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
psrld $0x01, %xmm5
psrld $2, %xmm6
pxor %xmm6, %xmm5
pxor %xmm4, %xmm5
psrld $7, %xmm4
pxor %xmm7, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm2
L_AES_GCM_encrypt_aesni_aesenc_last15_enc_avx_done:
L_AES_GCM_encrypt_aesni_done_enc:
movl 148(%esp), %edi
movl 164(%esp), %ebx
movl 152(%esp), %edx
movl 156(%esp), %ecx
shll $3, %edx
shll $3, %ecx
pinsrd $0x00, %edx, %xmm4
pinsrd $2, %ecx, %xmm4
movl 152(%esp), %edx
movl 156(%esp), %ecx
shrl $29, %edx
shrl $29, %ecx
pinsrd $0x01, %edx, %xmm4
pinsrd $3, %ecx, %xmm4
pxor %xmm4, %xmm2
pshufd $0x4e, %xmm1, %xmm5
pshufd $0x4e, %xmm2, %xmm6
movdqa %xmm2, %xmm7
movdqa %xmm2, %xmm4
pclmulqdq $0x11, %xmm1, %xmm7
pclmulqdq $0x00, %xmm1, %xmm4
pxor %xmm1, %xmm5
pxor %xmm2, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm7, %xmm2
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm4
pxor %xmm5, %xmm2
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pslld $31, %xmm5
pslld $30, %xmm6
pslld $25, %xmm7
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm7
psrldq $4, %xmm7
pslldq $12, %xmm5
pxor %xmm5, %xmm4
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
psrld $0x01, %xmm5
psrld $2, %xmm6
pxor %xmm6, %xmm5
pxor %xmm4, %xmm5
psrld $7, %xmm4
pxor %xmm7, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm2
pshufb L_aes_gcm_bswap_mask, %xmm2
movdqu 80(%esp), %xmm4
pxor %xmm2, %xmm4
cmpl $16, %ebx
je L_AES_GCM_encrypt_aesni_store_tag_16
xorl %ecx, %ecx
movdqu %xmm4, (%esp)
L_AES_GCM_encrypt_aesni_store_tag_loop:
movzbl (%esp,%ecx,1), %eax
movb %al, (%edi,%ecx,1)
incl %ecx
cmpl %ebx, %ecx
jne L_AES_GCM_encrypt_aesni_store_tag_loop
jmp L_AES_GCM_encrypt_aesni_store_tag_done
L_AES_GCM_encrypt_aesni_store_tag_16:
movdqu %xmm4, (%edi)
L_AES_GCM_encrypt_aesni_store_tag_done:
addl $0x70, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_encrypt_aesni,.-AES_GCM_encrypt_aesni
.text
.globl AES_GCM_decrypt_aesni
.type AES_GCM_decrypt_aesni,@function
.align 16
AES_GCM_decrypt_aesni:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $0xb0, %esp
movl 208(%esp), %esi
movl 232(%esp), %ebp
movl 224(%esp), %edx
pxor %xmm0, %xmm0
pxor %xmm2, %xmm2
cmpl $12, %edx
jne L_AES_GCM_decrypt_aesni_iv_not_12
# # Calculate values when IV is 12 bytes
# Set counter based on IV
movl $0x1000000, %ecx
pinsrd $0x00, (%esi), %xmm0
pinsrd $0x01, 4(%esi), %xmm0
pinsrd $2, 8(%esi), %xmm0
pinsrd $3, %ecx, %xmm0
# H = Encrypt X(=0) and T = Encrypt counter
movdqa %xmm0, %xmm5
movdqa (%ebp), %xmm1
pxor %xmm1, %xmm5
movdqa 16(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 32(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 48(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 64(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 80(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 96(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 112(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 128(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 144(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
cmpl $11, 236(%esp)
movdqa 160(%ebp), %xmm3
jl L_AES_GCM_decrypt_aesni_calc_iv_12_last
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 176(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
cmpl $13, 236(%esp)
movdqa 192(%ebp), %xmm3
jl L_AES_GCM_decrypt_aesni_calc_iv_12_last
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 208(%ebp), %xmm3
aesenc %xmm3, %xmm1
aesenc %xmm3, %xmm5
movdqa 224(%ebp), %xmm3
L_AES_GCM_decrypt_aesni_calc_iv_12_last:
aesenclast %xmm3, %xmm1
aesenclast %xmm3, %xmm5
pshufb L_aes_gcm_bswap_mask, %xmm1
movdqu %xmm5, 80(%esp)
jmp L_AES_GCM_decrypt_aesni_iv_done
L_AES_GCM_decrypt_aesni_iv_not_12:
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
movdqa (%ebp), %xmm1
aesenc 16(%ebp), %xmm1
aesenc 32(%ebp), %xmm1
aesenc 48(%ebp), %xmm1
aesenc 64(%ebp), %xmm1
aesenc 80(%ebp), %xmm1
aesenc 96(%ebp), %xmm1
aesenc 112(%ebp), %xmm1
aesenc 128(%ebp), %xmm1
aesenc 144(%ebp), %xmm1
cmpl $11, 236(%esp)
movdqa 160(%ebp), %xmm5
jl L_AES_GCM_decrypt_aesni_calc_iv_1_aesenc_avx_last
aesenc %xmm5, %xmm1
aesenc 176(%ebp), %xmm1
cmpl $13, 236(%esp)
movdqa 192(%ebp), %xmm5
jl L_AES_GCM_decrypt_aesni_calc_iv_1_aesenc_avx_last
aesenc %xmm5, %xmm1
aesenc 208(%ebp), %xmm1
movdqa 224(%ebp), %xmm5
L_AES_GCM_decrypt_aesni_calc_iv_1_aesenc_avx_last:
aesenclast %xmm5, %xmm1
pshufb L_aes_gcm_bswap_mask, %xmm1
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movl $0x00, %ecx
je L_AES_GCM_decrypt_aesni_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_decrypt_aesni_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_decrypt_aesni_calc_iv_16_loop:
movdqu (%esi,%ecx,1), %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm4, %xmm0
pshufd $0x4e, %xmm0, %xmm5
pshufd $0x4e, %xmm1, %xmm6
movdqa %xmm1, %xmm7
movdqa %xmm1, %xmm4
pclmulqdq $0x11, %xmm0, %xmm7
pclmulqdq $0x00, %xmm0, %xmm4
pxor %xmm0, %xmm5
pxor %xmm1, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm4, %xmm3
movdqa %xmm7, %xmm0
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm3
pxor %xmm5, %xmm0
movdqa %xmm3, %xmm4
movdqa %xmm0, %xmm5
psrld $31, %xmm4
psrld $31, %xmm5
pslld $0x01, %xmm3
pslld $0x01, %xmm0
movdqa %xmm4, %xmm6
pslldq $4, %xmm4
psrldq $12, %xmm6
pslldq $4, %xmm5
por %xmm6, %xmm0
por %xmm4, %xmm3
por %xmm5, %xmm0
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm5
movdqa %xmm3, %xmm6
pslld $31, %xmm4
pslld $30, %xmm5
pslld $25, %xmm6
pxor %xmm5, %xmm4
pxor %xmm6, %xmm4
movdqa %xmm4, %xmm5
psrldq $4, %xmm5
pslldq $12, %xmm4
pxor %xmm4, %xmm3
movdqa %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
psrld $0x01, %xmm6
psrld $2, %xmm7
psrld $7, %xmm4
pxor %xmm7, %xmm6
pxor %xmm4, %xmm6
pxor %xmm5, %xmm6
pxor %xmm3, %xmm6
pxor %xmm6, %xmm0
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_aesni_calc_iv_16_loop
movl 224(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_decrypt_aesni_calc_iv_done
L_AES_GCM_decrypt_aesni_calc_iv_lt16:
subl $16, %esp
pxor %xmm4, %xmm4
xorl %ebx, %ebx
movdqu %xmm4, (%esp)
L_AES_GCM_decrypt_aesni_calc_iv_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_aesni_calc_iv_loop
movdqu (%esp), %xmm4
addl $16, %esp
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm4, %xmm0
pshufd $0x4e, %xmm0, %xmm5
pshufd $0x4e, %xmm1, %xmm6
movdqa %xmm1, %xmm7
movdqa %xmm1, %xmm4
pclmulqdq $0x11, %xmm0, %xmm7
pclmulqdq $0x00, %xmm0, %xmm4
pxor %xmm0, %xmm5
pxor %xmm1, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm4, %xmm3
movdqa %xmm7, %xmm0
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm3
pxor %xmm5, %xmm0
movdqa %xmm3, %xmm4
movdqa %xmm0, %xmm5
psrld $31, %xmm4
psrld $31, %xmm5
pslld $0x01, %xmm3
pslld $0x01, %xmm0
movdqa %xmm4, %xmm6
pslldq $4, %xmm4
psrldq $12, %xmm6
pslldq $4, %xmm5
por %xmm6, %xmm0
por %xmm4, %xmm3
por %xmm5, %xmm0
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm5
movdqa %xmm3, %xmm6
pslld $31, %xmm4
pslld $30, %xmm5
pslld $25, %xmm6
pxor %xmm5, %xmm4
pxor %xmm6, %xmm4
movdqa %xmm4, %xmm5
psrldq $4, %xmm5
pslldq $12, %xmm4
pxor %xmm4, %xmm3
movdqa %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
psrld $0x01, %xmm6
psrld $2, %xmm7
psrld $7, %xmm4
pxor %xmm7, %xmm6
pxor %xmm4, %xmm6
pxor %xmm5, %xmm6
pxor %xmm3, %xmm6
pxor %xmm6, %xmm0
L_AES_GCM_decrypt_aesni_calc_iv_done:
# T = Encrypt counter
pxor %xmm4, %xmm4
shll $3, %edx
pinsrd $0x00, %edx, %xmm4
pxor %xmm4, %xmm0
pshufd $0x4e, %xmm0, %xmm5
pshufd $0x4e, %xmm1, %xmm6
movdqa %xmm1, %xmm7
movdqa %xmm1, %xmm4
pclmulqdq $0x11, %xmm0, %xmm7
pclmulqdq $0x00, %xmm0, %xmm4
pxor %xmm0, %xmm5
pxor %xmm1, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm4, %xmm3
movdqa %xmm7, %xmm0
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm3
pxor %xmm5, %xmm0
movdqa %xmm3, %xmm4
movdqa %xmm0, %xmm5
psrld $31, %xmm4
psrld $31, %xmm5
pslld $0x01, %xmm3
pslld $0x01, %xmm0
movdqa %xmm4, %xmm6
pslldq $4, %xmm4
psrldq $12, %xmm6
pslldq $4, %xmm5
por %xmm6, %xmm0
por %xmm4, %xmm3
por %xmm5, %xmm0
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm5
movdqa %xmm3, %xmm6
pslld $31, %xmm4
pslld $30, %xmm5
pslld $25, %xmm6
pxor %xmm5, %xmm4
pxor %xmm6, %xmm4
movdqa %xmm4, %xmm5
psrldq $4, %xmm5
pslldq $12, %xmm4
pxor %xmm4, %xmm3
movdqa %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
psrld $0x01, %xmm6
psrld $2, %xmm7
psrld $7, %xmm4
pxor %xmm7, %xmm6
pxor %xmm4, %xmm6
pxor %xmm5, %xmm6
pxor %xmm3, %xmm6
pxor %xmm6, %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm0
# Encrypt counter
movdqa (%ebp), %xmm4
pxor %xmm0, %xmm4
aesenc 16(%ebp), %xmm4
aesenc 32(%ebp), %xmm4
aesenc 48(%ebp), %xmm4
aesenc 64(%ebp), %xmm4
aesenc 80(%ebp), %xmm4
aesenc 96(%ebp), %xmm4
aesenc 112(%ebp), %xmm4
aesenc 128(%ebp), %xmm4
aesenc 144(%ebp), %xmm4
cmpl $11, 236(%esp)
movdqa 160(%ebp), %xmm5
jl L_AES_GCM_decrypt_aesni_calc_iv_2_aesenc_avx_last
aesenc %xmm5, %xmm4
aesenc 176(%ebp), %xmm4
cmpl $13, 236(%esp)
movdqa 192(%ebp), %xmm5
jl L_AES_GCM_decrypt_aesni_calc_iv_2_aesenc_avx_last
aesenc %xmm5, %xmm4
aesenc 208(%ebp), %xmm4
movdqa 224(%ebp), %xmm5
L_AES_GCM_decrypt_aesni_calc_iv_2_aesenc_avx_last:
aesenclast %xmm5, %xmm4
movdqu %xmm4, 80(%esp)
L_AES_GCM_decrypt_aesni_iv_done:
movl 204(%esp), %esi
# Additional authentication data
movl 220(%esp), %edx
cmpl $0x00, %edx
je L_AES_GCM_decrypt_aesni_calc_aad_done
xorl %ecx, %ecx
cmpl $16, %edx
jl L_AES_GCM_decrypt_aesni_calc_aad_lt16
andl $0xfffffff0, %edx
L_AES_GCM_decrypt_aesni_calc_aad_16_loop:
movdqu (%esi,%ecx,1), %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm4, %xmm2
pshufd $0x4e, %xmm2, %xmm5
pshufd $0x4e, %xmm1, %xmm6
movdqa %xmm1, %xmm7
movdqa %xmm1, %xmm4
pclmulqdq $0x11, %xmm2, %xmm7
pclmulqdq $0x00, %xmm2, %xmm4
pxor %xmm2, %xmm5
pxor %xmm1, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm4, %xmm3
movdqa %xmm7, %xmm2
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm3
pxor %xmm5, %xmm2
movdqa %xmm3, %xmm4
movdqa %xmm2, %xmm5
psrld $31, %xmm4
psrld $31, %xmm5
pslld $0x01, %xmm3
pslld $0x01, %xmm2
movdqa %xmm4, %xmm6
pslldq $4, %xmm4
psrldq $12, %xmm6
pslldq $4, %xmm5
por %xmm6, %xmm2
por %xmm4, %xmm3
por %xmm5, %xmm2
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm5
movdqa %xmm3, %xmm6
pslld $31, %xmm4
pslld $30, %xmm5
pslld $25, %xmm6
pxor %xmm5, %xmm4
pxor %xmm6, %xmm4
movdqa %xmm4, %xmm5
psrldq $4, %xmm5
pslldq $12, %xmm4
pxor %xmm4, %xmm3
movdqa %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
psrld $0x01, %xmm6
psrld $2, %xmm7
psrld $7, %xmm4
pxor %xmm7, %xmm6
pxor %xmm4, %xmm6
pxor %xmm5, %xmm6
pxor %xmm3, %xmm6
pxor %xmm6, %xmm2
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_aesni_calc_aad_16_loop
movl 220(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_decrypt_aesni_calc_aad_done
L_AES_GCM_decrypt_aesni_calc_aad_lt16:
subl $16, %esp
pxor %xmm4, %xmm4
xorl %ebx, %ebx
movdqu %xmm4, (%esp)
L_AES_GCM_decrypt_aesni_calc_aad_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_aesni_calc_aad_loop
movdqu (%esp), %xmm4
addl $16, %esp
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm4, %xmm2
pshufd $0x4e, %xmm2, %xmm5
pshufd $0x4e, %xmm1, %xmm6
movdqa %xmm1, %xmm7
movdqa %xmm1, %xmm4
pclmulqdq $0x11, %xmm2, %xmm7
pclmulqdq $0x00, %xmm2, %xmm4
pxor %xmm2, %xmm5
pxor %xmm1, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm4, %xmm3
movdqa %xmm7, %xmm2
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm3
pxor %xmm5, %xmm2
movdqa %xmm3, %xmm4
movdqa %xmm2, %xmm5
psrld $31, %xmm4
psrld $31, %xmm5
pslld $0x01, %xmm3
pslld $0x01, %xmm2
movdqa %xmm4, %xmm6
pslldq $4, %xmm4
psrldq $12, %xmm6
pslldq $4, %xmm5
por %xmm6, %xmm2
por %xmm4, %xmm3
por %xmm5, %xmm2
movdqa %xmm3, %xmm4
movdqa %xmm3, %xmm5
movdqa %xmm3, %xmm6
pslld $31, %xmm4
pslld $30, %xmm5
pslld $25, %xmm6
pxor %xmm5, %xmm4
pxor %xmm6, %xmm4
movdqa %xmm4, %xmm5
psrldq $4, %xmm5
pslldq $12, %xmm4
pxor %xmm4, %xmm3
movdqa %xmm3, %xmm6
movdqa %xmm3, %xmm7
movdqa %xmm3, %xmm4
psrld $0x01, %xmm6
psrld $2, %xmm7
psrld $7, %xmm4
pxor %xmm7, %xmm6
pxor %xmm4, %xmm6
pxor %xmm5, %xmm6
pxor %xmm3, %xmm6
pxor %xmm6, %xmm2
L_AES_GCM_decrypt_aesni_calc_aad_done:
movdqu %xmm2, 96(%esp)
movl 196(%esp), %esi
movl 200(%esp), %edi
# Calculate counter and H
pshufb L_aes_gcm_bswap_epi64, %xmm0
movdqa %xmm1, %xmm5
paddd L_aes_gcm_one, %xmm0
movdqa %xmm1, %xmm4
movdqu %xmm0, 64(%esp)
psrlq $63, %xmm5
psllq $0x01, %xmm4
pslldq $8, %xmm5
por %xmm5, %xmm4
pshufd $0xff, %xmm1, %xmm1
psrad $31, %xmm1
pand L_aes_gcm_mod2_128, %xmm1
pxor %xmm4, %xmm1
xorl %ebx, %ebx
cmpl $0x40, 216(%esp)
movl 216(%esp), %eax
jl L_AES_GCM_decrypt_aesni_done_64
andl $0xffffffc0, %eax
movdqa %xmm2, %xmm6
# H ^ 1
movdqu %xmm1, (%esp)
# H ^ 2
pshufd $0x4e, %xmm1, %xmm5
pshufd $0x4e, %xmm1, %xmm6
movdqa %xmm1, %xmm7
movdqa %xmm1, %xmm4
pclmulqdq $0x11, %xmm1, %xmm7
pclmulqdq $0x00, %xmm1, %xmm4
pxor %xmm1, %xmm5
pxor %xmm1, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm7, %xmm0
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm4
pxor %xmm5, %xmm0
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pslld $31, %xmm5
pslld $30, %xmm6
pslld $25, %xmm7
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm7
psrldq $4, %xmm7
pslldq $12, %xmm5
pxor %xmm5, %xmm4
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
psrld $0x01, %xmm5
psrld $2, %xmm6
pxor %xmm6, %xmm5
pxor %xmm4, %xmm5
psrld $7, %xmm4
pxor %xmm7, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm0
movdqu %xmm0, 16(%esp)
# H ^ 3
pshufd $0x4e, %xmm1, %xmm5
pshufd $0x4e, %xmm0, %xmm6
movdqa %xmm0, %xmm7
movdqa %xmm0, %xmm4
pclmulqdq $0x11, %xmm1, %xmm7
pclmulqdq $0x00, %xmm1, %xmm4
pxor %xmm1, %xmm5
pxor %xmm0, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm7, %xmm3
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm4
pxor %xmm5, %xmm3
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pslld $31, %xmm5
pslld $30, %xmm6
pslld $25, %xmm7
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm7
psrldq $4, %xmm7
pslldq $12, %xmm5
pxor %xmm5, %xmm4
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
psrld $0x01, %xmm5
psrld $2, %xmm6
pxor %xmm6, %xmm5
pxor %xmm4, %xmm5
psrld $7, %xmm4
pxor %xmm7, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm3
movdqu %xmm3, 32(%esp)
# H ^ 4
pshufd $0x4e, %xmm0, %xmm5
pshufd $0x4e, %xmm0, %xmm6
movdqa %xmm0, %xmm7
movdqa %xmm0, %xmm4
pclmulqdq $0x11, %xmm0, %xmm7
pclmulqdq $0x00, %xmm0, %xmm4
pxor %xmm0, %xmm5
pxor %xmm0, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm7, %xmm3
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm4
pxor %xmm5, %xmm3
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pslld $31, %xmm5
pslld $30, %xmm6
pslld $25, %xmm7
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm7
psrldq $4, %xmm7
pslldq $12, %xmm5
pxor %xmm5, %xmm4
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
psrld $0x01, %xmm5
psrld $2, %xmm6
pxor %xmm6, %xmm5
pxor %xmm4, %xmm5
psrld $7, %xmm4
pxor %xmm7, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm3
movdqu %xmm3, 48(%esp)
cmpl %esi, %edi
jne L_AES_GCM_decrypt_aesni_ghash_64
L_AES_GCM_decrypt_aesni_ghash_64_inplace:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# Encrypt 64 bytes of counter
movdqu 64(%esp), %xmm4
movdqa L_aes_gcm_bswap_epi64, %xmm3
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pshufb %xmm3, %xmm4
paddd L_aes_gcm_one, %xmm5
pshufb %xmm3, %xmm5
paddd L_aes_gcm_two, %xmm6
pshufb %xmm3, %xmm6
paddd L_aes_gcm_three, %xmm7
pshufb %xmm3, %xmm7
movdqu 64(%esp), %xmm3
paddd L_aes_gcm_four, %xmm3
movdqu %xmm3, 64(%esp)
movdqa (%ebp), %xmm3
pxor %xmm3, %xmm4
pxor %xmm3, %xmm5
pxor %xmm3, %xmm6
pxor %xmm3, %xmm7
movdqa 16(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 32(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 48(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 64(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 80(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 96(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 112(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 128(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 144(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
cmpl $11, 236(%esp)
movdqa 160(%ebp), %xmm3
jl L_AES_GCM_decrypt_aesniinplace_aesenc_64_ghash_avx_done
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 176(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
cmpl $13, 236(%esp)
movdqa 192(%ebp), %xmm3
jl L_AES_GCM_decrypt_aesniinplace_aesenc_64_ghash_avx_done
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 208(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 224(%ebp), %xmm3
L_AES_GCM_decrypt_aesniinplace_aesenc_64_ghash_avx_done:
aesenclast %xmm3, %xmm4
aesenclast %xmm3, %xmm5
movdqu (%ecx), %xmm0
movdqu 16(%ecx), %xmm1
pxor %xmm0, %xmm4
pxor %xmm1, %xmm5
movdqu %xmm0, 112(%esp)
movdqu %xmm1, 128(%esp)
movdqu %xmm4, (%edx)
movdqu %xmm5, 16(%edx)
aesenclast %xmm3, %xmm6
aesenclast %xmm3, %xmm7
movdqu 32(%ecx), %xmm0
movdqu 48(%ecx), %xmm1
pxor %xmm0, %xmm6
pxor %xmm1, %xmm7
movdqu %xmm0, 144(%esp)
movdqu %xmm1, 160(%esp)
movdqu %xmm6, 32(%edx)
movdqu %xmm7, 48(%edx)
# ghash encrypted counter
movdqu 96(%esp), %xmm6
movdqu 48(%esp), %xmm3
movdqu 112(%esp), %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm6, %xmm4
pshufd $0x4e, %xmm3, %xmm5
pshufd $0x4e, %xmm4, %xmm1
pxor %xmm3, %xmm5
pxor %xmm4, %xmm1
movdqa %xmm4, %xmm7
pclmulqdq $0x11, %xmm3, %xmm7
movdqa %xmm4, %xmm6
pclmulqdq $0x00, %xmm3, %xmm6
pclmulqdq $0x00, %xmm1, %xmm5
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqu 32(%esp), %xmm3
movdqu 128(%esp), %xmm4
pshufd $0x4e, %xmm3, %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm3, %xmm0
pshufd $0x4e, %xmm4, %xmm1
pxor %xmm4, %xmm1
movdqa %xmm4, %xmm2
pclmulqdq $0x11, %xmm3, %xmm2
pclmulqdq $0x00, %xmm4, %xmm3
pclmulqdq $0x00, %xmm1, %xmm0
pxor %xmm3, %xmm5
pxor %xmm3, %xmm6
pxor %xmm2, %xmm5
pxor %xmm2, %xmm7
pxor %xmm0, %xmm5
movdqu 16(%esp), %xmm3
movdqu 144(%esp), %xmm4
pshufd $0x4e, %xmm3, %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm3, %xmm0
pshufd $0x4e, %xmm4, %xmm1
pxor %xmm4, %xmm1
movdqa %xmm4, %xmm2
pclmulqdq $0x11, %xmm3, %xmm2
pclmulqdq $0x00, %xmm4, %xmm3
pclmulqdq $0x00, %xmm1, %xmm0
pxor %xmm3, %xmm5
pxor %xmm3, %xmm6
pxor %xmm2, %xmm5
pxor %xmm2, %xmm7
pxor %xmm0, %xmm5
movdqu (%esp), %xmm3
movdqu 160(%esp), %xmm4
pshufd $0x4e, %xmm3, %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm3, %xmm0
pshufd $0x4e, %xmm4, %xmm1
pxor %xmm4, %xmm1
movdqa %xmm4, %xmm2
pclmulqdq $0x11, %xmm3, %xmm2
pclmulqdq $0x00, %xmm4, %xmm3
pclmulqdq $0x00, %xmm1, %xmm0
pxor %xmm3, %xmm5
pxor %xmm3, %xmm6
pxor %xmm2, %xmm5
pxor %xmm2, %xmm7
pxor %xmm0, %xmm5
movdqa %xmm5, %xmm1
psrldq $8, %xmm5
pslldq $8, %xmm1
pxor %xmm1, %xmm6
pxor %xmm5, %xmm7
movdqa %xmm6, %xmm3
movdqa %xmm6, %xmm0
movdqa %xmm6, %xmm1
pslld $31, %xmm3
pslld $30, %xmm0
pslld $25, %xmm1
pxor %xmm0, %xmm3
pxor %xmm1, %xmm3
movdqa %xmm3, %xmm0
pslldq $12, %xmm3
psrldq $4, %xmm0
pxor %xmm3, %xmm6
movdqa %xmm6, %xmm1
movdqa %xmm6, %xmm5
movdqa %xmm6, %xmm4
psrld $0x01, %xmm1
psrld $2, %xmm5
psrld $7, %xmm4
pxor %xmm5, %xmm1
pxor %xmm4, %xmm1
pxor %xmm0, %xmm1
pxor %xmm1, %xmm6
pxor %xmm7, %xmm6
movdqu %xmm6, 96(%esp)
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_aesni_ghash_64_inplace
jmp L_AES_GCM_decrypt_aesni_ghash_64_done
L_AES_GCM_decrypt_aesni_ghash_64:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# Encrypt 64 bytes of counter
movdqu 64(%esp), %xmm4
movdqa L_aes_gcm_bswap_epi64, %xmm3
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pshufb %xmm3, %xmm4
paddd L_aes_gcm_one, %xmm5
pshufb %xmm3, %xmm5
paddd L_aes_gcm_two, %xmm6
pshufb %xmm3, %xmm6
paddd L_aes_gcm_three, %xmm7
pshufb %xmm3, %xmm7
movdqu 64(%esp), %xmm3
paddd L_aes_gcm_four, %xmm3
movdqu %xmm3, 64(%esp)
movdqa (%ebp), %xmm3
pxor %xmm3, %xmm4
pxor %xmm3, %xmm5
pxor %xmm3, %xmm6
pxor %xmm3, %xmm7
movdqa 16(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 32(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 48(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 64(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 80(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 96(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 112(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 128(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 144(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
cmpl $11, 236(%esp)
movdqa 160(%ebp), %xmm3
jl L_AES_GCM_decrypt_aesni_aesenc_64_ghash_avx_done
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 176(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
cmpl $13, 236(%esp)
movdqa 192(%ebp), %xmm3
jl L_AES_GCM_decrypt_aesni_aesenc_64_ghash_avx_done
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 208(%ebp), %xmm3
aesenc %xmm3, %xmm4
aesenc %xmm3, %xmm5
aesenc %xmm3, %xmm6
aesenc %xmm3, %xmm7
movdqa 224(%ebp), %xmm3
L_AES_GCM_decrypt_aesni_aesenc_64_ghash_avx_done:
aesenclast %xmm3, %xmm4
aesenclast %xmm3, %xmm5
movdqu (%ecx), %xmm0
movdqu 16(%ecx), %xmm1
pxor %xmm0, %xmm4
pxor %xmm1, %xmm5
movdqu %xmm0, (%ecx)
movdqu %xmm1, 16(%ecx)
movdqu %xmm4, (%edx)
movdqu %xmm5, 16(%edx)
aesenclast %xmm3, %xmm6
aesenclast %xmm3, %xmm7
movdqu 32(%ecx), %xmm0
movdqu 48(%ecx), %xmm1
pxor %xmm0, %xmm6
pxor %xmm1, %xmm7
movdqu %xmm0, 32(%ecx)
movdqu %xmm1, 48(%ecx)
movdqu %xmm6, 32(%edx)
movdqu %xmm7, 48(%edx)
# ghash encrypted counter
movdqu 96(%esp), %xmm6
movdqu 48(%esp), %xmm3
movdqu (%ecx), %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm6, %xmm4
pshufd $0x4e, %xmm3, %xmm5
pshufd $0x4e, %xmm4, %xmm1
pxor %xmm3, %xmm5
pxor %xmm4, %xmm1
movdqa %xmm4, %xmm7
pclmulqdq $0x11, %xmm3, %xmm7
movdqa %xmm4, %xmm6
pclmulqdq $0x00, %xmm3, %xmm6
pclmulqdq $0x00, %xmm1, %xmm5
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqu 32(%esp), %xmm3
movdqu 16(%ecx), %xmm4
pshufd $0x4e, %xmm3, %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm3, %xmm0
pshufd $0x4e, %xmm4, %xmm1
pxor %xmm4, %xmm1
movdqa %xmm4, %xmm2
pclmulqdq $0x11, %xmm3, %xmm2
pclmulqdq $0x00, %xmm4, %xmm3
pclmulqdq $0x00, %xmm1, %xmm0
pxor %xmm3, %xmm5
pxor %xmm3, %xmm6
pxor %xmm2, %xmm5
pxor %xmm2, %xmm7
pxor %xmm0, %xmm5
movdqu 16(%esp), %xmm3
movdqu 32(%ecx), %xmm4
pshufd $0x4e, %xmm3, %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm3, %xmm0
pshufd $0x4e, %xmm4, %xmm1
pxor %xmm4, %xmm1
movdqa %xmm4, %xmm2
pclmulqdq $0x11, %xmm3, %xmm2
pclmulqdq $0x00, %xmm4, %xmm3
pclmulqdq $0x00, %xmm1, %xmm0
pxor %xmm3, %xmm5
pxor %xmm3, %xmm6
pxor %xmm2, %xmm5
pxor %xmm2, %xmm7
pxor %xmm0, %xmm5
movdqu (%esp), %xmm3
movdqu 48(%ecx), %xmm4
pshufd $0x4e, %xmm3, %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm4
pxor %xmm3, %xmm0
pshufd $0x4e, %xmm4, %xmm1
pxor %xmm4, %xmm1
movdqa %xmm4, %xmm2
pclmulqdq $0x11, %xmm3, %xmm2
pclmulqdq $0x00, %xmm4, %xmm3
pclmulqdq $0x00, %xmm1, %xmm0
pxor %xmm3, %xmm5
pxor %xmm3, %xmm6
pxor %xmm2, %xmm5
pxor %xmm2, %xmm7
pxor %xmm0, %xmm5
movdqa %xmm5, %xmm1
psrldq $8, %xmm5
pslldq $8, %xmm1
pxor %xmm1, %xmm6
pxor %xmm5, %xmm7
movdqa %xmm6, %xmm3
movdqa %xmm6, %xmm0
movdqa %xmm6, %xmm1
pslld $31, %xmm3
pslld $30, %xmm0
pslld $25, %xmm1
pxor %xmm0, %xmm3
pxor %xmm1, %xmm3
movdqa %xmm3, %xmm0
pslldq $12, %xmm3
psrldq $4, %xmm0
pxor %xmm3, %xmm6
movdqa %xmm6, %xmm1
movdqa %xmm6, %xmm5
movdqa %xmm6, %xmm4
psrld $0x01, %xmm1
psrld $2, %xmm5
psrld $7, %xmm4
pxor %xmm5, %xmm1
pxor %xmm4, %xmm1
pxor %xmm0, %xmm1
pxor %xmm1, %xmm6
pxor %xmm7, %xmm6
movdqu %xmm6, 96(%esp)
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_aesni_ghash_64
L_AES_GCM_decrypt_aesni_ghash_64_done:
movdqa %xmm6, %xmm2
movdqu (%esp), %xmm1
L_AES_GCM_decrypt_aesni_done_64:
movl 216(%esp), %edx
cmpl %edx, %ebx
jge L_AES_GCM_decrypt_aesni_done_dec
movl 216(%esp), %eax
andl $0xfffffff0, %eax
cmpl %eax, %ebx
jge L_AES_GCM_decrypt_aesni_last_block_done
L_AES_GCM_decrypt_aesni_last_block_start:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
movdqu (%ecx), %xmm5
pshufb L_aes_gcm_bswap_mask, %xmm5
pxor %xmm2, %xmm5
movdqu %xmm5, (%esp)
movdqu 64(%esp), %xmm4
movdqa %xmm4, %xmm5
pshufb L_aes_gcm_bswap_epi64, %xmm4
paddd L_aes_gcm_one, %xmm5
pxor (%ebp), %xmm4
movdqu %xmm5, 64(%esp)
movdqu (%esp), %xmm0
pclmulqdq $16, %xmm1, %xmm0
aesenc 16(%ebp), %xmm4
aesenc 32(%ebp), %xmm4
movdqu (%esp), %xmm3
pclmulqdq $0x01, %xmm1, %xmm3
aesenc 48(%ebp), %xmm4
aesenc 64(%ebp), %xmm4
aesenc 80(%ebp), %xmm4
movdqu (%esp), %xmm5
pclmulqdq $0x11, %xmm1, %xmm5
aesenc 96(%ebp), %xmm4
pxor %xmm3, %xmm0
movdqa %xmm0, %xmm6
psrldq $8, %xmm0
pslldq $8, %xmm6
aesenc 112(%ebp), %xmm4
movdqu (%esp), %xmm3
pclmulqdq $0x00, %xmm1, %xmm3
pxor %xmm3, %xmm6
pxor %xmm0, %xmm5
movdqa L_aes_gcm_mod2_128, %xmm7
movdqa %xmm6, %xmm3
pclmulqdq $16, %xmm7, %xmm3
aesenc 128(%ebp), %xmm4
pshufd $0x4e, %xmm6, %xmm0
pxor %xmm3, %xmm0
movdqa %xmm0, %xmm3
pclmulqdq $16, %xmm7, %xmm3
aesenc 144(%ebp), %xmm4
pshufd $0x4e, %xmm0, %xmm2
pxor %xmm3, %xmm2
pxor %xmm5, %xmm2
cmpl $11, 236(%esp)
movdqa 160(%ebp), %xmm5
jl L_AES_GCM_decrypt_aesni_aesenc_gfmul_last
aesenc %xmm5, %xmm4
aesenc 176(%ebp), %xmm4
cmpl $13, 236(%esp)
movdqa 192(%ebp), %xmm5
jl L_AES_GCM_decrypt_aesni_aesenc_gfmul_last
aesenc %xmm5, %xmm4
aesenc 208(%ebp), %xmm4
movdqa 224(%ebp), %xmm5
L_AES_GCM_decrypt_aesni_aesenc_gfmul_last:
aesenclast %xmm5, %xmm4
movdqu (%ecx), %xmm5
pxor %xmm5, %xmm4
movdqu %xmm4, (%edx)
addl $16, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_aesni_last_block_start
L_AES_GCM_decrypt_aesni_last_block_done:
movl 216(%esp), %ecx
movl %ecx, %edx
andl $15, %ecx
jz L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_done
movdqu 64(%esp), %xmm0
pshufb L_aes_gcm_bswap_epi64, %xmm0
pxor (%ebp), %xmm0
aesenc 16(%ebp), %xmm0
aesenc 32(%ebp), %xmm0
aesenc 48(%ebp), %xmm0
aesenc 64(%ebp), %xmm0
aesenc 80(%ebp), %xmm0
aesenc 96(%ebp), %xmm0
aesenc 112(%ebp), %xmm0
aesenc 128(%ebp), %xmm0
aesenc 144(%ebp), %xmm0
cmpl $11, 236(%esp)
movdqa 160(%ebp), %xmm5
jl L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_aesenc_avx_last
aesenc %xmm5, %xmm0
aesenc 176(%ebp), %xmm0
cmpl $13, 236(%esp)
movdqa 192(%ebp), %xmm5
jl L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_aesenc_avx_last
aesenc %xmm5, %xmm0
aesenc 208(%ebp), %xmm0
movdqa 224(%ebp), %xmm5
L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_aesenc_avx_last:
aesenclast %xmm5, %xmm0
subl $32, %esp
xorl %ecx, %ecx
movdqu %xmm0, (%esp)
pxor %xmm4, %xmm4
movdqu %xmm4, 16(%esp)
L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_loop:
movzbl (%esi,%ebx,1), %eax
movb %al, 16(%esp,%ecx,1)
xorb (%esp,%ecx,1), %al
movb %al, (%edi,%ebx,1)
incl %ebx
incl %ecx
cmpl %edx, %ebx
jl L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_loop
movdqu 16(%esp), %xmm0
addl $32, %esp
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm0, %xmm2
pshufd $0x4e, %xmm1, %xmm5
pshufd $0x4e, %xmm2, %xmm6
movdqa %xmm2, %xmm7
movdqa %xmm2, %xmm4
pclmulqdq $0x11, %xmm1, %xmm7
pclmulqdq $0x00, %xmm1, %xmm4
pxor %xmm1, %xmm5
pxor %xmm2, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm7, %xmm2
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm4
pxor %xmm5, %xmm2
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pslld $31, %xmm5
pslld $30, %xmm6
pslld $25, %xmm7
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm7
psrldq $4, %xmm7
pslldq $12, %xmm5
pxor %xmm5, %xmm4
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
psrld $0x01, %xmm5
psrld $2, %xmm6
pxor %xmm6, %xmm5
pxor %xmm4, %xmm5
psrld $7, %xmm4
pxor %xmm7, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm2
L_AES_GCM_decrypt_aesni_aesenc_last15_dec_avx_done:
L_AES_GCM_decrypt_aesni_done_dec:
movl 212(%esp), %esi
movl 228(%esp), %ebp
movl 216(%esp), %edx
movl 220(%esp), %ecx
shll $3, %edx
shll $3, %ecx
pinsrd $0x00, %edx, %xmm4
pinsrd $2, %ecx, %xmm4
movl 216(%esp), %edx
movl 220(%esp), %ecx
shrl $29, %edx
shrl $29, %ecx
pinsrd $0x01, %edx, %xmm4
pinsrd $3, %ecx, %xmm4
pxor %xmm4, %xmm2
pshufd $0x4e, %xmm1, %xmm5
pshufd $0x4e, %xmm2, %xmm6
movdqa %xmm2, %xmm7
movdqa %xmm2, %xmm4
pclmulqdq $0x11, %xmm1, %xmm7
pclmulqdq $0x00, %xmm1, %xmm4
pxor %xmm1, %xmm5
pxor %xmm2, %xmm6
pclmulqdq $0x00, %xmm6, %xmm5
pxor %xmm4, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm6
movdqa %xmm7, %xmm2
pslldq $8, %xmm6
psrldq $8, %xmm5
pxor %xmm6, %xmm4
pxor %xmm5, %xmm2
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
movdqa %xmm4, %xmm7
pslld $31, %xmm5
pslld $30, %xmm6
pslld $25, %xmm7
pxor %xmm6, %xmm5
pxor %xmm7, %xmm5
movdqa %xmm5, %xmm7
psrldq $4, %xmm7
pslldq $12, %xmm5
pxor %xmm5, %xmm4
movdqa %xmm4, %xmm5
movdqa %xmm4, %xmm6
psrld $0x01, %xmm5
psrld $2, %xmm6
pxor %xmm6, %xmm5
pxor %xmm4, %xmm5
psrld $7, %xmm4
pxor %xmm7, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm2
pshufb L_aes_gcm_bswap_mask, %xmm2
movdqu 80(%esp), %xmm4
pxor %xmm2, %xmm4
movl 240(%esp), %edi
cmpl $16, %ebp
je L_AES_GCM_decrypt_aesni_cmp_tag_16
subl $16, %esp
xorl %ecx, %ecx
xorl %ebx, %ebx
movdqu %xmm4, (%esp)
L_AES_GCM_decrypt_aesni_cmp_tag_loop:
movzbl (%esp,%ecx,1), %eax
xorb (%esi,%ecx,1), %al
orb %al, %bl
incl %ecx
cmpl %ebp, %ecx
jne L_AES_GCM_decrypt_aesni_cmp_tag_loop
cmpb $0x00, %bl
sete %bl
addl $16, %esp
xorl %ecx, %ecx
jmp L_AES_GCM_decrypt_aesni_cmp_tag_done
L_AES_GCM_decrypt_aesni_cmp_tag_16:
movdqu (%esi), %xmm5
pcmpeqb %xmm5, %xmm4
pmovmskb %xmm4, %edx
# %%edx == 0xFFFF then return 1 else => return 0
xorl %ebx, %ebx
cmpl $0xffff, %edx
sete %bl
L_AES_GCM_decrypt_aesni_cmp_tag_done:
movl %ebx, (%edi)
addl $0xb0, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_decrypt_aesni,.-AES_GCM_decrypt_aesni
#ifdef WOLFSSL_AESGCM_STREAM
.text
.globl AES_GCM_init_aesni
.type AES_GCM_init_aesni,@function
.align 16
AES_GCM_init_aesni:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $16, %esp
movl 36(%esp), %ebp
movl 44(%esp), %esi
movl 60(%esp), %edi
pxor %xmm4, %xmm4
movl 48(%esp), %edx
cmpl $12, %edx
jne L_AES_GCM_init_aesni_iv_not_12
# # Calculate values when IV is 12 bytes
# Set counter based on IV
movl $0x1000000, %ecx
pinsrd $0x00, (%esi), %xmm4
pinsrd $0x01, 4(%esi), %xmm4
pinsrd $2, 8(%esi), %xmm4
pinsrd $3, %ecx, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
movdqa %xmm4, %xmm1
movdqa (%ebp), %xmm5
pxor %xmm5, %xmm1
movdqa 16(%ebp), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 32(%ebp), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 48(%ebp), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 64(%ebp), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 80(%ebp), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 96(%ebp), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 112(%ebp), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 128(%ebp), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 144(%ebp), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
cmpl $11, 40(%esp)
movdqa 160(%ebp), %xmm7
jl L_AES_GCM_init_aesni_calc_iv_12_last
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 176(%ebp), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
cmpl $13, 40(%esp)
movdqa 192(%ebp), %xmm7
jl L_AES_GCM_init_aesni_calc_iv_12_last
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 208(%ebp), %xmm7
aesenc %xmm7, %xmm5
aesenc %xmm7, %xmm1
movdqa 224(%ebp), %xmm7
L_AES_GCM_init_aesni_calc_iv_12_last:
aesenclast %xmm7, %xmm5
aesenclast %xmm7, %xmm1
pshufb L_aes_gcm_bswap_mask, %xmm5
movdqu %xmm1, (%edi)
jmp L_AES_GCM_init_aesni_iv_done
L_AES_GCM_init_aesni_iv_not_12:
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
movdqa (%ebp), %xmm5
aesenc 16(%ebp), %xmm5
aesenc 32(%ebp), %xmm5
aesenc 48(%ebp), %xmm5
aesenc 64(%ebp), %xmm5
aesenc 80(%ebp), %xmm5
aesenc 96(%ebp), %xmm5
aesenc 112(%ebp), %xmm5
aesenc 128(%ebp), %xmm5
aesenc 144(%ebp), %xmm5
cmpl $11, 40(%esp)
movdqa 160(%ebp), %xmm1
jl L_AES_GCM_init_aesni_calc_iv_1_aesenc_avx_last
aesenc %xmm1, %xmm5
aesenc 176(%ebp), %xmm5
cmpl $13, 40(%esp)
movdqa 192(%ebp), %xmm1
jl L_AES_GCM_init_aesni_calc_iv_1_aesenc_avx_last
aesenc %xmm1, %xmm5
aesenc 208(%ebp), %xmm5
movdqa 224(%ebp), %xmm1
L_AES_GCM_init_aesni_calc_iv_1_aesenc_avx_last:
aesenclast %xmm1, %xmm5
pshufb L_aes_gcm_bswap_mask, %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movl $0x00, %ecx
je L_AES_GCM_init_aesni_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_init_aesni_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_init_aesni_calc_iv_16_loop:
movdqu (%esi,%ecx,1), %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm0, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm7
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm7
pxor %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm7
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm7
por %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm7, %xmm1
movdqa %xmm7, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm7
movdqa %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm7, %xmm2
pxor %xmm2, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_init_aesni_calc_iv_16_loop
movl 48(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_init_aesni_calc_iv_done
L_AES_GCM_init_aesni_calc_iv_lt16:
subl $16, %esp
pxor %xmm0, %xmm0
xorl %ebx, %ebx
movdqu %xmm0, (%esp)
L_AES_GCM_init_aesni_calc_iv_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_init_aesni_calc_iv_loop
movdqu (%esp), %xmm0
addl $16, %esp
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm0, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm7
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm7
pxor %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm7
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm7
por %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm7, %xmm1
movdqa %xmm7, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm7
movdqa %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm7, %xmm2
pxor %xmm2, %xmm4
L_AES_GCM_init_aesni_calc_iv_done:
# T = Encrypt counter
pxor %xmm0, %xmm0
shll $3, %edx
pinsrd $0x00, %edx, %xmm0
pxor %xmm0, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm7
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm7
pxor %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm7
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm7
por %xmm1, %xmm4
movdqa %xmm7, %xmm0
movdqa %xmm7, %xmm1
movdqa %xmm7, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm7
movdqa %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm7, %xmm2
pxor %xmm2, %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm4
# Encrypt counter
movdqa (%ebp), %xmm0
pxor %xmm4, %xmm0
aesenc 16(%ebp), %xmm0
aesenc 32(%ebp), %xmm0
aesenc 48(%ebp), %xmm0
aesenc 64(%ebp), %xmm0
aesenc 80(%ebp), %xmm0
aesenc 96(%ebp), %xmm0
aesenc 112(%ebp), %xmm0
aesenc 128(%ebp), %xmm0
aesenc 144(%ebp), %xmm0
cmpl $11, 40(%esp)
movdqa 160(%ebp), %xmm1
jl L_AES_GCM_init_aesni_calc_iv_2_aesenc_avx_last
aesenc %xmm1, %xmm0
aesenc 176(%ebp), %xmm0
cmpl $13, 40(%esp)
movdqa 192(%ebp), %xmm1
jl L_AES_GCM_init_aesni_calc_iv_2_aesenc_avx_last
aesenc %xmm1, %xmm0
aesenc 208(%ebp), %xmm0
movdqa 224(%ebp), %xmm1
L_AES_GCM_init_aesni_calc_iv_2_aesenc_avx_last:
aesenclast %xmm1, %xmm0
movdqu %xmm0, (%edi)
L_AES_GCM_init_aesni_iv_done:
movl 52(%esp), %ebp
movl 56(%esp), %edi
pshufb L_aes_gcm_bswap_epi64, %xmm4
paddd L_aes_gcm_one, %xmm4
movdqa %xmm5, (%ebp)
movdqa %xmm4, (%edi)
addl $16, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_init_aesni,.-AES_GCM_init_aesni
.text
.globl AES_GCM_aad_update_aesni
.type AES_GCM_aad_update_aesni,@function
.align 16
AES_GCM_aad_update_aesni:
pushl %esi
pushl %edi
movl 12(%esp), %esi
movl 16(%esp), %edx
movl 20(%esp), %edi
movl 24(%esp), %eax
movdqa (%edi), %xmm5
movdqa (%eax), %xmm6
xorl %ecx, %ecx
L_AES_GCM_aad_update_aesni_16_loop:
movdqu (%esi,%ecx,1), %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm0, %xmm5
pshufd $0x4e, %xmm5, %xmm1
pshufd $0x4e, %xmm6, %xmm2
movdqa %xmm6, %xmm3
movdqa %xmm6, %xmm0
pclmulqdq $0x11, %xmm5, %xmm3
pclmulqdq $0x00, %xmm5, %xmm0
pxor %xmm5, %xmm1
pxor %xmm6, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm4
movdqa %xmm3, %xmm5
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm5
movdqa %xmm4, %xmm0
movdqa %xmm5, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm4
pslld $0x01, %xmm5
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm5
por %xmm0, %xmm4
por %xmm1, %xmm5
movdqa %xmm4, %xmm0
movdqa %xmm4, %xmm1
movdqa %xmm4, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm4
movdqa %xmm4, %xmm2
movdqa %xmm4, %xmm3
movdqa %xmm4, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm4, %xmm2
pxor %xmm2, %xmm5
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_aad_update_aesni_16_loop
movdqa %xmm5, (%edi)
popl %edi
popl %esi
ret
.size AES_GCM_aad_update_aesni,.-AES_GCM_aad_update_aesni
.text
.globl AES_GCM_encrypt_block_aesni
.type AES_GCM_encrypt_block_aesni,@function
.align 16
AES_GCM_encrypt_block_aesni:
pushl %esi
pushl %edi
movl 12(%esp), %ecx
movl 16(%esp), %eax
movl 20(%esp), %edi
movl 24(%esp), %esi
movl 28(%esp), %edx
movdqu (%edx), %xmm0
movdqa %xmm0, %xmm1
pshufb L_aes_gcm_bswap_epi64, %xmm0
paddd L_aes_gcm_one, %xmm1
pxor (%ecx), %xmm0
movdqu %xmm1, (%edx)
aesenc 16(%ecx), %xmm0
aesenc 32(%ecx), %xmm0
aesenc 48(%ecx), %xmm0
aesenc 64(%ecx), %xmm0
aesenc 80(%ecx), %xmm0
aesenc 96(%ecx), %xmm0
aesenc 112(%ecx), %xmm0
aesenc 128(%ecx), %xmm0
aesenc 144(%ecx), %xmm0
cmpl $11, %eax
movdqa 160(%ecx), %xmm1
jl L_AES_GCM_encrypt_block_aesni_aesenc_block_aesenc_avx_last
aesenc %xmm1, %xmm0
aesenc 176(%ecx), %xmm0
cmpl $13, %eax
movdqa 192(%ecx), %xmm1
jl L_AES_GCM_encrypt_block_aesni_aesenc_block_aesenc_avx_last
aesenc %xmm1, %xmm0
aesenc 208(%ecx), %xmm0
movdqa 224(%ecx), %xmm1
L_AES_GCM_encrypt_block_aesni_aesenc_block_aesenc_avx_last:
aesenclast %xmm1, %xmm0
movdqu (%esi), %xmm1
pxor %xmm1, %xmm0
movdqu %xmm0, (%edi)
pshufb L_aes_gcm_bswap_mask, %xmm0
popl %edi
popl %esi
ret
.size AES_GCM_encrypt_block_aesni,.-AES_GCM_encrypt_block_aesni
.text
.globl AES_GCM_ghash_block_aesni
.type AES_GCM_ghash_block_aesni,@function
.align 16
AES_GCM_ghash_block_aesni:
movl 4(%esp), %edx
movl 8(%esp), %eax
movl 12(%esp), %ecx
movdqa (%eax), %xmm4
movdqa (%ecx), %xmm5
movdqu (%edx), %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm0, %xmm4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm6
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm6
pxor %xmm1, %xmm4
movdqa %xmm6, %xmm0
movdqa %xmm4, %xmm1
psrld $31, %xmm0
psrld $31, %xmm1
pslld $0x01, %xmm6
pslld $0x01, %xmm4
movdqa %xmm0, %xmm2
pslldq $4, %xmm0
psrldq $12, %xmm2
pslldq $4, %xmm1
por %xmm2, %xmm4
por %xmm0, %xmm6
por %xmm1, %xmm4
movdqa %xmm6, %xmm0
movdqa %xmm6, %xmm1
movdqa %xmm6, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm6
movdqa %xmm6, %xmm2
movdqa %xmm6, %xmm3
movdqa %xmm6, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm6, %xmm2
pxor %xmm2, %xmm4
movdqa %xmm4, (%eax)
ret
.size AES_GCM_ghash_block_aesni,.-AES_GCM_ghash_block_aesni
.text
.globl AES_GCM_encrypt_update_aesni
.type AES_GCM_encrypt_update_aesni,@function
.align 16
AES_GCM_encrypt_update_aesni:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $0x60, %esp
movl 144(%esp), %esi
movdqa (%esi), %xmm4
movdqu %xmm4, 64(%esp)
movl 136(%esp), %esi
movl 140(%esp), %ebp
movdqa (%esi), %xmm6
movdqa (%ebp), %xmm5
movdqu %xmm6, 80(%esp)
movl 116(%esp), %ebp
movl 124(%esp), %edi
movl 128(%esp), %esi
movdqa %xmm5, %xmm1
movdqa %xmm5, %xmm0
psrlq $63, %xmm1
psllq $0x01, %xmm0
pslldq $8, %xmm1
por %xmm1, %xmm0
pshufd $0xff, %xmm5, %xmm5
psrad $31, %xmm5
pand L_aes_gcm_mod2_128, %xmm5
pxor %xmm0, %xmm5
xorl %ebx, %ebx
cmpl $0x40, 132(%esp)
movl 132(%esp), %eax
jl L_AES_GCM_encrypt_update_aesni_done_64
andl $0xffffffc0, %eax
movdqa %xmm6, %xmm2
# H ^ 1
movdqu %xmm5, (%esp)
# H ^ 2
pshufd $0x4e, %xmm5, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm5, %xmm3
pclmulqdq $0x00, %xmm5, %xmm0
pxor %xmm5, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm0
pxor %xmm1, %xmm4
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
pslld $31, %xmm1
pslld $30, %xmm2
pslld $25, %xmm3
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm3
psrldq $4, %xmm3
pslldq $12, %xmm1
pxor %xmm1, %xmm0
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
psrld $0x01, %xmm1
psrld $2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm0, %xmm1
psrld $7, %xmm0
pxor %xmm3, %xmm1
pxor %xmm0, %xmm1
pxor %xmm1, %xmm4
movdqu %xmm4, 16(%esp)
# H ^ 3
pshufd $0x4e, %xmm5, %xmm1
pshufd $0x4e, %xmm4, %xmm2
movdqa %xmm4, %xmm3
movdqa %xmm4, %xmm0
pclmulqdq $0x11, %xmm5, %xmm3
pclmulqdq $0x00, %xmm5, %xmm0
pxor %xmm5, %xmm1
pxor %xmm4, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm3, %xmm7
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm0
pxor %xmm1, %xmm7
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
pslld $31, %xmm1
pslld $30, %xmm2
pslld $25, %xmm3
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm3
psrldq $4, %xmm3
pslldq $12, %xmm1
pxor %xmm1, %xmm0
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
psrld $0x01, %xmm1
psrld $2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm0, %xmm1
psrld $7, %xmm0
pxor %xmm3, %xmm1
pxor %xmm0, %xmm1
pxor %xmm1, %xmm7
movdqu %xmm7, 32(%esp)
# H ^ 4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm4, %xmm2
movdqa %xmm4, %xmm3
movdqa %xmm4, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm4, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm3, %xmm7
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm0
pxor %xmm1, %xmm7
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
pslld $31, %xmm1
pslld $30, %xmm2
pslld $25, %xmm3
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm3
psrldq $4, %xmm3
pslldq $12, %xmm1
pxor %xmm1, %xmm0
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
psrld $0x01, %xmm1
psrld $2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm0, %xmm1
psrld $7, %xmm0
pxor %xmm3, %xmm1
pxor %xmm0, %xmm1
pxor %xmm1, %xmm7
movdqu %xmm7, 48(%esp)
# First 64 bytes of input
# Encrypt 64 bytes of counter
movdqu 64(%esp), %xmm0
movdqa L_aes_gcm_bswap_epi64, %xmm7
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
pshufb %xmm7, %xmm0
paddd L_aes_gcm_one, %xmm1
pshufb %xmm7, %xmm1
paddd L_aes_gcm_two, %xmm2
pshufb %xmm7, %xmm2
paddd L_aes_gcm_three, %xmm3
pshufb %xmm7, %xmm3
movdqu 64(%esp), %xmm7
paddd L_aes_gcm_four, %xmm7
movdqu %xmm7, 64(%esp)
movdqa (%ebp), %xmm7
pxor %xmm7, %xmm0
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm7, %xmm3
movdqa 16(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 32(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 48(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 64(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 80(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 96(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 112(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 128(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 144(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
cmpl $11, 120(%esp)
movdqa 160(%ebp), %xmm7
jl L_AES_GCM_encrypt_update_aesni_enc_done
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 176(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
cmpl $13, 120(%esp)
movdqa 192(%ebp), %xmm7
jl L_AES_GCM_encrypt_update_aesni_enc_done
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 208(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 224(%ebp), %xmm7
L_AES_GCM_encrypt_update_aesni_enc_done:
aesenclast %xmm7, %xmm0
aesenclast %xmm7, %xmm1
movdqu (%esi), %xmm4
movdqu 16(%esi), %xmm5
pxor %xmm4, %xmm0
pxor %xmm5, %xmm1
movdqu %xmm0, (%edi)
movdqu %xmm1, 16(%edi)
aesenclast %xmm7, %xmm2
aesenclast %xmm7, %xmm3
movdqu 32(%esi), %xmm4
movdqu 48(%esi), %xmm5
pxor %xmm4, %xmm2
pxor %xmm5, %xmm3
movdqu %xmm2, 32(%edi)
movdqu %xmm3, 48(%edi)
cmpl $0x40, %eax
movl $0x40, %ebx
jle L_AES_GCM_encrypt_update_aesni_end_64
# More 64 bytes of input
L_AES_GCM_encrypt_update_aesni_ghash_64:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# Encrypt 64 bytes of counter
movdqu 64(%esp), %xmm0
movdqa L_aes_gcm_bswap_epi64, %xmm7
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
pshufb %xmm7, %xmm0
paddd L_aes_gcm_one, %xmm1
pshufb %xmm7, %xmm1
paddd L_aes_gcm_two, %xmm2
pshufb %xmm7, %xmm2
paddd L_aes_gcm_three, %xmm3
pshufb %xmm7, %xmm3
movdqu 64(%esp), %xmm7
paddd L_aes_gcm_four, %xmm7
movdqu %xmm7, 64(%esp)
movdqa (%ebp), %xmm7
pxor %xmm7, %xmm0
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm7, %xmm3
movdqa 16(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 32(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 48(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 64(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 80(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 96(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 112(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 128(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 144(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
cmpl $11, 120(%esp)
movdqa 160(%ebp), %xmm7
jl L_AES_GCM_encrypt_update_aesni_aesenc_64_ghash_avx_done
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 176(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
cmpl $13, 120(%esp)
movdqa 192(%ebp), %xmm7
jl L_AES_GCM_encrypt_update_aesni_aesenc_64_ghash_avx_done
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 208(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 224(%ebp), %xmm7
L_AES_GCM_encrypt_update_aesni_aesenc_64_ghash_avx_done:
aesenclast %xmm7, %xmm0
aesenclast %xmm7, %xmm1
movdqu (%ecx), %xmm4
movdqu 16(%ecx), %xmm5
pxor %xmm4, %xmm0
pxor %xmm5, %xmm1
movdqu %xmm0, (%edx)
movdqu %xmm1, 16(%edx)
aesenclast %xmm7, %xmm2
aesenclast %xmm7, %xmm3
movdqu 32(%ecx), %xmm4
movdqu 48(%ecx), %xmm5
pxor %xmm4, %xmm2
pxor %xmm5, %xmm3
movdqu %xmm2, 32(%edx)
movdqu %xmm3, 48(%edx)
# ghash encrypted counter
movdqu 80(%esp), %xmm2
movdqu 48(%esp), %xmm7
movdqu -64(%edx), %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm2, %xmm0
pshufd $0x4e, %xmm7, %xmm1
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm7, %xmm1
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm3
pclmulqdq $0x11, %xmm7, %xmm3
movdqa %xmm0, %xmm2
pclmulqdq $0x00, %xmm7, %xmm2
pclmulqdq $0x00, %xmm5, %xmm1
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqu 32(%esp), %xmm7
movdqu -48(%edx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
pclmulqdq $0x00, %xmm0, %xmm7
pclmulqdq $0x00, %xmm5, %xmm4
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 16(%esp), %xmm7
movdqu -32(%edx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
pclmulqdq $0x00, %xmm0, %xmm7
pclmulqdq $0x00, %xmm5, %xmm4
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu (%esp), %xmm7
movdqu -16(%edx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
pclmulqdq $0x00, %xmm0, %xmm7
pclmulqdq $0x00, %xmm5, %xmm4
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqa %xmm1, %xmm5
psrldq $8, %xmm1
pslldq $8, %xmm5
pxor %xmm5, %xmm2
pxor %xmm1, %xmm3
movdqa %xmm2, %xmm7
movdqa %xmm2, %xmm4
movdqa %xmm2, %xmm5
pslld $31, %xmm7
pslld $30, %xmm4
pslld $25, %xmm5
pxor %xmm4, %xmm7
pxor %xmm5, %xmm7
movdqa %xmm7, %xmm4
pslldq $12, %xmm7
psrldq $4, %xmm4
pxor %xmm7, %xmm2
movdqa %xmm2, %xmm5
movdqa %xmm2, %xmm1
movdqa %xmm2, %xmm0
psrld $0x01, %xmm5
psrld $2, %xmm1
psrld $7, %xmm0
pxor %xmm1, %xmm5
pxor %xmm0, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm2
pxor %xmm3, %xmm2
movdqu %xmm2, 80(%esp)
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_encrypt_update_aesni_ghash_64
L_AES_GCM_encrypt_update_aesni_end_64:
movdqu 80(%esp), %xmm6
# Block 1
movdqa L_aes_gcm_bswap_mask, %xmm0
movdqu (%edx), %xmm5
pshufb %xmm0, %xmm5
movdqu 48(%esp), %xmm7
pxor %xmm6, %xmm5
pshufd $0x4e, %xmm5, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm5, %xmm3
pclmulqdq $0x00, %xmm5, %xmm0
pxor %xmm5, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm0, %xmm4
movdqa %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
# Block 2
movdqa L_aes_gcm_bswap_mask, %xmm0
movdqu 16(%edx), %xmm5
pshufb %xmm0, %xmm5
movdqu 32(%esp), %xmm7
pshufd $0x4e, %xmm5, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm5, %xmm3
pclmulqdq $0x00, %xmm5, %xmm0
pxor %xmm5, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
# Block 3
movdqa L_aes_gcm_bswap_mask, %xmm0
movdqu 32(%edx), %xmm5
pshufb %xmm0, %xmm5
movdqu 16(%esp), %xmm7
pshufd $0x4e, %xmm5, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm5, %xmm3
pclmulqdq $0x00, %xmm5, %xmm0
pxor %xmm5, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
# Block 4
movdqa L_aes_gcm_bswap_mask, %xmm0
movdqu 48(%edx), %xmm5
pshufb %xmm0, %xmm5
movdqu (%esp), %xmm7
pshufd $0x4e, %xmm5, %xmm1
pshufd $0x4e, %xmm7, %xmm2
movdqa %xmm7, %xmm3
movdqa %xmm7, %xmm0
pclmulqdq $0x11, %xmm5, %xmm3
pclmulqdq $0x00, %xmm5, %xmm0
pxor %xmm5, %xmm1
pxor %xmm7, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
pxor %xmm0, %xmm4
pxor %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm4
pxor %xmm1, %xmm6
movdqa %xmm4, %xmm0
movdqa %xmm4, %xmm1
movdqa %xmm4, %xmm2
pslld $31, %xmm0
pslld $30, %xmm1
pslld $25, %xmm2
pxor %xmm1, %xmm0
pxor %xmm2, %xmm0
movdqa %xmm0, %xmm1
psrldq $4, %xmm1
pslldq $12, %xmm0
pxor %xmm0, %xmm4
movdqa %xmm4, %xmm2
movdqa %xmm4, %xmm3
movdqa %xmm4, %xmm0
psrld $0x01, %xmm2
psrld $2, %xmm3
psrld $7, %xmm0
pxor %xmm3, %xmm2
pxor %xmm0, %xmm2
pxor %xmm1, %xmm2
pxor %xmm4, %xmm2
pxor %xmm2, %xmm6
movdqu (%esp), %xmm5
L_AES_GCM_encrypt_update_aesni_done_64:
movl 132(%esp), %edx
cmpl %edx, %ebx
jge L_AES_GCM_encrypt_update_aesni_done_enc
movl 132(%esp), %eax
andl $0xfffffff0, %eax
cmpl %eax, %ebx
jge L_AES_GCM_encrypt_update_aesni_last_block_done
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
movdqu 64(%esp), %xmm0
movdqa %xmm0, %xmm1
pshufb L_aes_gcm_bswap_epi64, %xmm0
paddd L_aes_gcm_one, %xmm1
pxor (%ebp), %xmm0
movdqu %xmm1, 64(%esp)
aesenc 16(%ebp), %xmm0
aesenc 32(%ebp), %xmm0
aesenc 48(%ebp), %xmm0
aesenc 64(%ebp), %xmm0
aesenc 80(%ebp), %xmm0
aesenc 96(%ebp), %xmm0
aesenc 112(%ebp), %xmm0
aesenc 128(%ebp), %xmm0
aesenc 144(%ebp), %xmm0
cmpl $11, 120(%esp)
movdqa 160(%ebp), %xmm1
jl L_AES_GCM_encrypt_update_aesni_aesenc_block_aesenc_avx_last
aesenc %xmm1, %xmm0
aesenc 176(%ebp), %xmm0
cmpl $13, 120(%esp)
movdqa 192(%ebp), %xmm1
jl L_AES_GCM_encrypt_update_aesni_aesenc_block_aesenc_avx_last
aesenc %xmm1, %xmm0
aesenc 208(%ebp), %xmm0
movdqa 224(%ebp), %xmm1
L_AES_GCM_encrypt_update_aesni_aesenc_block_aesenc_avx_last:
aesenclast %xmm1, %xmm0
movdqu (%ecx), %xmm1
pxor %xmm1, %xmm0
movdqu %xmm0, (%edx)
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm0, %xmm6
addl $16, %ebx
cmpl %eax, %ebx
jge L_AES_GCM_encrypt_update_aesni_last_block_ghash
L_AES_GCM_encrypt_update_aesni_last_block_start:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
movdqu 64(%esp), %xmm0
movdqa %xmm0, %xmm1
pshufb L_aes_gcm_bswap_epi64, %xmm0
paddd L_aes_gcm_one, %xmm1
pxor (%ebp), %xmm0
movdqu %xmm1, 64(%esp)
movdqu %xmm6, %xmm4
pclmulqdq $16, %xmm5, %xmm4
aesenc 16(%ebp), %xmm0
aesenc 32(%ebp), %xmm0
movdqu %xmm6, %xmm7
pclmulqdq $0x01, %xmm5, %xmm7
aesenc 48(%ebp), %xmm0
aesenc 64(%ebp), %xmm0
aesenc 80(%ebp), %xmm0
movdqu %xmm6, %xmm1
pclmulqdq $0x11, %xmm5, %xmm1
aesenc 96(%ebp), %xmm0
pxor %xmm7, %xmm4
movdqa %xmm4, %xmm2
psrldq $8, %xmm4
pslldq $8, %xmm2
aesenc 112(%ebp), %xmm0
movdqu %xmm6, %xmm7
pclmulqdq $0x00, %xmm5, %xmm7
pxor %xmm7, %xmm2
pxor %xmm4, %xmm1
movdqa L_aes_gcm_mod2_128, %xmm3
movdqa %xmm2, %xmm7
pclmulqdq $16, %xmm3, %xmm7
aesenc 128(%ebp), %xmm0
pshufd $0x4e, %xmm2, %xmm4
pxor %xmm7, %xmm4
movdqa %xmm4, %xmm7
pclmulqdq $16, %xmm3, %xmm7
aesenc 144(%ebp), %xmm0
pshufd $0x4e, %xmm4, %xmm6
pxor %xmm7, %xmm6
pxor %xmm1, %xmm6
cmpl $11, 120(%esp)
movdqa 160(%ebp), %xmm1
jl L_AES_GCM_encrypt_update_aesni_aesenc_gfmul_last
aesenc %xmm1, %xmm0
aesenc 176(%ebp), %xmm0
cmpl $13, 120(%esp)
movdqa 192(%ebp), %xmm1
jl L_AES_GCM_encrypt_update_aesni_aesenc_gfmul_last
aesenc %xmm1, %xmm0
aesenc 208(%ebp), %xmm0
movdqa 224(%ebp), %xmm1
L_AES_GCM_encrypt_update_aesni_aesenc_gfmul_last:
aesenclast %xmm1, %xmm0
movdqu (%ecx), %xmm1
pxor %xmm1, %xmm0
movdqu %xmm0, (%edx)
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm0, %xmm6
addl $16, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_encrypt_update_aesni_last_block_start
L_AES_GCM_encrypt_update_aesni_last_block_ghash:
pshufd $0x4e, %xmm5, %xmm1
pshufd $0x4e, %xmm6, %xmm2
movdqa %xmm6, %xmm3
movdqa %xmm6, %xmm0
pclmulqdq $0x11, %xmm5, %xmm3
pclmulqdq $0x00, %xmm5, %xmm0
pxor %xmm5, %xmm1
pxor %xmm6, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm0
pxor %xmm1, %xmm6
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
pslld $31, %xmm1
pslld $30, %xmm2
pslld $25, %xmm3
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm3
psrldq $4, %xmm3
pslldq $12, %xmm1
pxor %xmm1, %xmm0
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
psrld $0x01, %xmm1
psrld $2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm0, %xmm1
psrld $7, %xmm0
pxor %xmm3, %xmm1
pxor %xmm0, %xmm1
pxor %xmm1, %xmm6
L_AES_GCM_encrypt_update_aesni_last_block_done:
L_AES_GCM_encrypt_update_aesni_done_enc:
movl 136(%esp), %esi
movl 144(%esp), %edi
movdqu 64(%esp), %xmm4
movdqa %xmm6, (%esi)
movdqu %xmm4, (%edi)
addl $0x60, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_encrypt_update_aesni,.-AES_GCM_encrypt_update_aesni
.text
.globl AES_GCM_encrypt_final_aesni
.type AES_GCM_encrypt_final_aesni,@function
.align 16
AES_GCM_encrypt_final_aesni:
pushl %esi
pushl %edi
pushl %ebp
subl $16, %esp
movl 32(%esp), %ebp
movl 52(%esp), %esi
movl 56(%esp), %edi
movdqa (%ebp), %xmm4
movdqa (%esi), %xmm5
movdqa (%edi), %xmm6
movdqa %xmm5, %xmm1
movdqa %xmm5, %xmm0
psrlq $63, %xmm1
psllq $0x01, %xmm0
pslldq $8, %xmm1
por %xmm1, %xmm0
pshufd $0xff, %xmm5, %xmm5
psrad $31, %xmm5
pand L_aes_gcm_mod2_128, %xmm5
pxor %xmm0, %xmm5
movl 44(%esp), %edx
movl 48(%esp), %ecx
shll $3, %edx
shll $3, %ecx
pinsrd $0x00, %edx, %xmm0
pinsrd $2, %ecx, %xmm0
movl 44(%esp), %edx
movl 48(%esp), %ecx
shrl $29, %edx
shrl $29, %ecx
pinsrd $0x01, %edx, %xmm0
pinsrd $3, %ecx, %xmm0
pxor %xmm0, %xmm4
pshufd $0x4e, %xmm5, %xmm1
pshufd $0x4e, %xmm4, %xmm2
movdqa %xmm4, %xmm3
movdqa %xmm4, %xmm0
pclmulqdq $0x11, %xmm5, %xmm3
pclmulqdq $0x00, %xmm5, %xmm0
pxor %xmm5, %xmm1
pxor %xmm4, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm0
pxor %xmm1, %xmm4
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
pslld $31, %xmm1
pslld $30, %xmm2
pslld $25, %xmm3
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm3
psrldq $4, %xmm3
pslldq $12, %xmm1
pxor %xmm1, %xmm0
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
psrld $0x01, %xmm1
psrld $2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm0, %xmm1
psrld $7, %xmm0
pxor %xmm3, %xmm1
pxor %xmm0, %xmm1
pxor %xmm1, %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm4
movdqu %xmm6, %xmm0
pxor %xmm4, %xmm0
movl 36(%esp), %edi
cmpl $16, 40(%esp)
je L_AES_GCM_encrypt_final_aesni_store_tag_16
xorl %ecx, %ecx
movdqu %xmm0, (%esp)
L_AES_GCM_encrypt_final_aesni_store_tag_loop:
movzbl (%esp,%ecx,1), %eax
movb %al, (%edi,%ecx,1)
incl %ecx
cmpl 40(%esp), %ecx
jne L_AES_GCM_encrypt_final_aesni_store_tag_loop
jmp L_AES_GCM_encrypt_final_aesni_store_tag_done
L_AES_GCM_encrypt_final_aesni_store_tag_16:
movdqu %xmm0, (%edi)
L_AES_GCM_encrypt_final_aesni_store_tag_done:
addl $16, %esp
popl %ebp
popl %edi
popl %esi
ret
.size AES_GCM_encrypt_final_aesni,.-AES_GCM_encrypt_final_aesni
.text
.globl AES_GCM_decrypt_update_aesni
.type AES_GCM_decrypt_update_aesni,@function
.align 16
AES_GCM_decrypt_update_aesni:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $0xa0, %esp
movl 208(%esp), %esi
movdqa (%esi), %xmm4
movdqu %xmm4, 64(%esp)
movl 200(%esp), %esi
movl 204(%esp), %ebp
movdqa (%esi), %xmm6
movdqa (%ebp), %xmm5
movdqu %xmm6, 80(%esp)
movl 180(%esp), %ebp
movl 188(%esp), %edi
movl 192(%esp), %esi
movdqa %xmm5, %xmm1
movdqa %xmm5, %xmm0
psrlq $63, %xmm1
psllq $0x01, %xmm0
pslldq $8, %xmm1
por %xmm1, %xmm0
pshufd $0xff, %xmm5, %xmm5
psrad $31, %xmm5
pand L_aes_gcm_mod2_128, %xmm5
pxor %xmm0, %xmm5
xorl %ebx, %ebx
cmpl $0x40, 196(%esp)
movl 196(%esp), %eax
jl L_AES_GCM_decrypt_update_aesni_done_64
andl $0xffffffc0, %eax
movdqa %xmm6, %xmm2
# H ^ 1
movdqu %xmm5, (%esp)
# H ^ 2
pshufd $0x4e, %xmm5, %xmm1
pshufd $0x4e, %xmm5, %xmm2
movdqa %xmm5, %xmm3
movdqa %xmm5, %xmm0
pclmulqdq $0x11, %xmm5, %xmm3
pclmulqdq $0x00, %xmm5, %xmm0
pxor %xmm5, %xmm1
pxor %xmm5, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm3, %xmm4
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm0
pxor %xmm1, %xmm4
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
pslld $31, %xmm1
pslld $30, %xmm2
pslld $25, %xmm3
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm3
psrldq $4, %xmm3
pslldq $12, %xmm1
pxor %xmm1, %xmm0
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
psrld $0x01, %xmm1
psrld $2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm0, %xmm1
psrld $7, %xmm0
pxor %xmm3, %xmm1
pxor %xmm0, %xmm1
pxor %xmm1, %xmm4
movdqu %xmm4, 16(%esp)
# H ^ 3
pshufd $0x4e, %xmm5, %xmm1
pshufd $0x4e, %xmm4, %xmm2
movdqa %xmm4, %xmm3
movdqa %xmm4, %xmm0
pclmulqdq $0x11, %xmm5, %xmm3
pclmulqdq $0x00, %xmm5, %xmm0
pxor %xmm5, %xmm1
pxor %xmm4, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm3, %xmm7
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm0
pxor %xmm1, %xmm7
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
pslld $31, %xmm1
pslld $30, %xmm2
pslld $25, %xmm3
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm3
psrldq $4, %xmm3
pslldq $12, %xmm1
pxor %xmm1, %xmm0
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
psrld $0x01, %xmm1
psrld $2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm0, %xmm1
psrld $7, %xmm0
pxor %xmm3, %xmm1
pxor %xmm0, %xmm1
pxor %xmm1, %xmm7
movdqu %xmm7, 32(%esp)
# H ^ 4
pshufd $0x4e, %xmm4, %xmm1
pshufd $0x4e, %xmm4, %xmm2
movdqa %xmm4, %xmm3
movdqa %xmm4, %xmm0
pclmulqdq $0x11, %xmm4, %xmm3
pclmulqdq $0x00, %xmm4, %xmm0
pxor %xmm4, %xmm1
pxor %xmm4, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm3, %xmm7
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm0
pxor %xmm1, %xmm7
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
pslld $31, %xmm1
pslld $30, %xmm2
pslld $25, %xmm3
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm3
psrldq $4, %xmm3
pslldq $12, %xmm1
pxor %xmm1, %xmm0
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
psrld $0x01, %xmm1
psrld $2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm0, %xmm1
psrld $7, %xmm0
pxor %xmm3, %xmm1
pxor %xmm0, %xmm1
pxor %xmm1, %xmm7
movdqu %xmm7, 48(%esp)
cmpl %esi, %edi
jne L_AES_GCM_decrypt_update_aesni_ghash_64
L_AES_GCM_decrypt_update_aesni_ghash_64_inplace:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# Encrypt 64 bytes of counter
movdqu 64(%esp), %xmm0
movdqa L_aes_gcm_bswap_epi64, %xmm7
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
pshufb %xmm7, %xmm0
paddd L_aes_gcm_one, %xmm1
pshufb %xmm7, %xmm1
paddd L_aes_gcm_two, %xmm2
pshufb %xmm7, %xmm2
paddd L_aes_gcm_three, %xmm3
pshufb %xmm7, %xmm3
movdqu 64(%esp), %xmm7
paddd L_aes_gcm_four, %xmm7
movdqu %xmm7, 64(%esp)
movdqa (%ebp), %xmm7
pxor %xmm7, %xmm0
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm7, %xmm3
movdqa 16(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 32(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 48(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 64(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 80(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 96(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 112(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 128(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 144(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
cmpl $11, 184(%esp)
movdqa 160(%ebp), %xmm7
jl L_AES_GCM_decrypt_update_aesniinplace_aesenc_64_ghash_avx_done
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 176(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
cmpl $13, 184(%esp)
movdqa 192(%ebp), %xmm7
jl L_AES_GCM_decrypt_update_aesniinplace_aesenc_64_ghash_avx_done
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 208(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 224(%ebp), %xmm7
L_AES_GCM_decrypt_update_aesniinplace_aesenc_64_ghash_avx_done:
aesenclast %xmm7, %xmm0
aesenclast %xmm7, %xmm1
movdqu (%ecx), %xmm4
movdqu 16(%ecx), %xmm5
pxor %xmm4, %xmm0
pxor %xmm5, %xmm1
movdqu %xmm4, 96(%esp)
movdqu %xmm5, 112(%esp)
movdqu %xmm0, (%edx)
movdqu %xmm1, 16(%edx)
aesenclast %xmm7, %xmm2
aesenclast %xmm7, %xmm3
movdqu 32(%ecx), %xmm4
movdqu 48(%ecx), %xmm5
pxor %xmm4, %xmm2
pxor %xmm5, %xmm3
movdqu %xmm4, 128(%esp)
movdqu %xmm5, 144(%esp)
movdqu %xmm2, 32(%edx)
movdqu %xmm3, 48(%edx)
# ghash encrypted counter
movdqu 80(%esp), %xmm2
movdqu 48(%esp), %xmm7
movdqu 96(%esp), %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm2, %xmm0
pshufd $0x4e, %xmm7, %xmm1
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm7, %xmm1
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm3
pclmulqdq $0x11, %xmm7, %xmm3
movdqa %xmm0, %xmm2
pclmulqdq $0x00, %xmm7, %xmm2
pclmulqdq $0x00, %xmm5, %xmm1
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqu 32(%esp), %xmm7
movdqu 112(%esp), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
pclmulqdq $0x00, %xmm0, %xmm7
pclmulqdq $0x00, %xmm5, %xmm4
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 16(%esp), %xmm7
movdqu 128(%esp), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
pclmulqdq $0x00, %xmm0, %xmm7
pclmulqdq $0x00, %xmm5, %xmm4
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu (%esp), %xmm7
movdqu 144(%esp), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
pclmulqdq $0x00, %xmm0, %xmm7
pclmulqdq $0x00, %xmm5, %xmm4
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqa %xmm1, %xmm5
psrldq $8, %xmm1
pslldq $8, %xmm5
pxor %xmm5, %xmm2
pxor %xmm1, %xmm3
movdqa %xmm2, %xmm7
movdqa %xmm2, %xmm4
movdqa %xmm2, %xmm5
pslld $31, %xmm7
pslld $30, %xmm4
pslld $25, %xmm5
pxor %xmm4, %xmm7
pxor %xmm5, %xmm7
movdqa %xmm7, %xmm4
pslldq $12, %xmm7
psrldq $4, %xmm4
pxor %xmm7, %xmm2
movdqa %xmm2, %xmm5
movdqa %xmm2, %xmm1
movdqa %xmm2, %xmm0
psrld $0x01, %xmm5
psrld $2, %xmm1
psrld $7, %xmm0
pxor %xmm1, %xmm5
pxor %xmm0, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm2
pxor %xmm3, %xmm2
movdqu %xmm2, 80(%esp)
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_update_aesni_ghash_64_inplace
jmp L_AES_GCM_decrypt_update_aesni_ghash_64_done
L_AES_GCM_decrypt_update_aesni_ghash_64:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# Encrypt 64 bytes of counter
movdqu 64(%esp), %xmm0
movdqa L_aes_gcm_bswap_epi64, %xmm7
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
pshufb %xmm7, %xmm0
paddd L_aes_gcm_one, %xmm1
pshufb %xmm7, %xmm1
paddd L_aes_gcm_two, %xmm2
pshufb %xmm7, %xmm2
paddd L_aes_gcm_three, %xmm3
pshufb %xmm7, %xmm3
movdqu 64(%esp), %xmm7
paddd L_aes_gcm_four, %xmm7
movdqu %xmm7, 64(%esp)
movdqa (%ebp), %xmm7
pxor %xmm7, %xmm0
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm7, %xmm3
movdqa 16(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 32(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 48(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 64(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 80(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 96(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 112(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 128(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 144(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
cmpl $11, 184(%esp)
movdqa 160(%ebp), %xmm7
jl L_AES_GCM_decrypt_update_aesni_aesenc_64_ghash_avx_done
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 176(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
cmpl $13, 184(%esp)
movdqa 192(%ebp), %xmm7
jl L_AES_GCM_decrypt_update_aesni_aesenc_64_ghash_avx_done
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 208(%ebp), %xmm7
aesenc %xmm7, %xmm0
aesenc %xmm7, %xmm1
aesenc %xmm7, %xmm2
aesenc %xmm7, %xmm3
movdqa 224(%ebp), %xmm7
L_AES_GCM_decrypt_update_aesni_aesenc_64_ghash_avx_done:
aesenclast %xmm7, %xmm0
aesenclast %xmm7, %xmm1
movdqu (%ecx), %xmm4
movdqu 16(%ecx), %xmm5
pxor %xmm4, %xmm0
pxor %xmm5, %xmm1
movdqu %xmm4, (%ecx)
movdqu %xmm5, 16(%ecx)
movdqu %xmm0, (%edx)
movdqu %xmm1, 16(%edx)
aesenclast %xmm7, %xmm2
aesenclast %xmm7, %xmm3
movdqu 32(%ecx), %xmm4
movdqu 48(%ecx), %xmm5
pxor %xmm4, %xmm2
pxor %xmm5, %xmm3
movdqu %xmm4, 32(%ecx)
movdqu %xmm5, 48(%ecx)
movdqu %xmm2, 32(%edx)
movdqu %xmm3, 48(%edx)
# ghash encrypted counter
movdqu 80(%esp), %xmm2
movdqu 48(%esp), %xmm7
movdqu (%ecx), %xmm0
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm2, %xmm0
pshufd $0x4e, %xmm7, %xmm1
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm7, %xmm1
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm3
pclmulqdq $0x11, %xmm7, %xmm3
movdqa %xmm0, %xmm2
pclmulqdq $0x00, %xmm7, %xmm2
pclmulqdq $0x00, %xmm5, %xmm1
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqu 32(%esp), %xmm7
movdqu 16(%ecx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
pclmulqdq $0x00, %xmm0, %xmm7
pclmulqdq $0x00, %xmm5, %xmm4
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu 16(%esp), %xmm7
movdqu 32(%ecx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
pclmulqdq $0x00, %xmm0, %xmm7
pclmulqdq $0x00, %xmm5, %xmm4
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqu (%esp), %xmm7
movdqu 48(%ecx), %xmm0
pshufd $0x4e, %xmm7, %xmm4
pshufb L_aes_gcm_bswap_mask, %xmm0
pxor %xmm7, %xmm4
pshufd $0x4e, %xmm0, %xmm5
pxor %xmm0, %xmm5
movdqa %xmm0, %xmm6
pclmulqdq $0x11, %xmm7, %xmm6
pclmulqdq $0x00, %xmm0, %xmm7
pclmulqdq $0x00, %xmm5, %xmm4
pxor %xmm7, %xmm1
pxor %xmm7, %xmm2
pxor %xmm6, %xmm1
pxor %xmm6, %xmm3
pxor %xmm4, %xmm1
movdqa %xmm1, %xmm5
psrldq $8, %xmm1
pslldq $8, %xmm5
pxor %xmm5, %xmm2
pxor %xmm1, %xmm3
movdqa %xmm2, %xmm7
movdqa %xmm2, %xmm4
movdqa %xmm2, %xmm5
pslld $31, %xmm7
pslld $30, %xmm4
pslld $25, %xmm5
pxor %xmm4, %xmm7
pxor %xmm5, %xmm7
movdqa %xmm7, %xmm4
pslldq $12, %xmm7
psrldq $4, %xmm4
pxor %xmm7, %xmm2
movdqa %xmm2, %xmm5
movdqa %xmm2, %xmm1
movdqa %xmm2, %xmm0
psrld $0x01, %xmm5
psrld $2, %xmm1
psrld $7, %xmm0
pxor %xmm1, %xmm5
pxor %xmm0, %xmm5
pxor %xmm4, %xmm5
pxor %xmm5, %xmm2
pxor %xmm3, %xmm2
movdqu %xmm2, 80(%esp)
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_update_aesni_ghash_64
L_AES_GCM_decrypt_update_aesni_ghash_64_done:
movdqa %xmm2, %xmm6
movdqu (%esp), %xmm5
L_AES_GCM_decrypt_update_aesni_done_64:
movl 196(%esp), %edx
cmpl %edx, %ebx
jge L_AES_GCM_decrypt_update_aesni_done_dec
movl 196(%esp), %eax
andl $0xfffffff0, %eax
cmpl %eax, %ebx
jge L_AES_GCM_decrypt_update_aesni_last_block_done
L_AES_GCM_decrypt_update_aesni_last_block_start:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
movdqu (%ecx), %xmm1
pshufb L_aes_gcm_bswap_mask, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, (%esp)
movdqu 64(%esp), %xmm0
movdqa %xmm0, %xmm1
pshufb L_aes_gcm_bswap_epi64, %xmm0
paddd L_aes_gcm_one, %xmm1
pxor (%ebp), %xmm0
movdqu %xmm1, 64(%esp)
movdqu (%esp), %xmm4
pclmulqdq $16, %xmm5, %xmm4
aesenc 16(%ebp), %xmm0
aesenc 32(%ebp), %xmm0
movdqu (%esp), %xmm7
pclmulqdq $0x01, %xmm5, %xmm7
aesenc 48(%ebp), %xmm0
aesenc 64(%ebp), %xmm0
aesenc 80(%ebp), %xmm0
movdqu (%esp), %xmm1
pclmulqdq $0x11, %xmm5, %xmm1
aesenc 96(%ebp), %xmm0
pxor %xmm7, %xmm4
movdqa %xmm4, %xmm2
psrldq $8, %xmm4
pslldq $8, %xmm2
aesenc 112(%ebp), %xmm0
movdqu (%esp), %xmm7
pclmulqdq $0x00, %xmm5, %xmm7
pxor %xmm7, %xmm2
pxor %xmm4, %xmm1
movdqa L_aes_gcm_mod2_128, %xmm3
movdqa %xmm2, %xmm7
pclmulqdq $16, %xmm3, %xmm7
aesenc 128(%ebp), %xmm0
pshufd $0x4e, %xmm2, %xmm4
pxor %xmm7, %xmm4
movdqa %xmm4, %xmm7
pclmulqdq $16, %xmm3, %xmm7
aesenc 144(%ebp), %xmm0
pshufd $0x4e, %xmm4, %xmm6
pxor %xmm7, %xmm6
pxor %xmm1, %xmm6
cmpl $11, 184(%esp)
movdqa 160(%ebp), %xmm1
jl L_AES_GCM_decrypt_update_aesni_aesenc_gfmul_last
aesenc %xmm1, %xmm0
aesenc 176(%ebp), %xmm0
cmpl $13, 184(%esp)
movdqa 192(%ebp), %xmm1
jl L_AES_GCM_decrypt_update_aesni_aesenc_gfmul_last
aesenc %xmm1, %xmm0
aesenc 208(%ebp), %xmm0
movdqa 224(%ebp), %xmm1
L_AES_GCM_decrypt_update_aesni_aesenc_gfmul_last:
aesenclast %xmm1, %xmm0
movdqu (%ecx), %xmm1
pxor %xmm1, %xmm0
movdqu %xmm0, (%edx)
addl $16, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_update_aesni_last_block_start
L_AES_GCM_decrypt_update_aesni_last_block_done:
L_AES_GCM_decrypt_update_aesni_done_dec:
movl 200(%esp), %esi
movl 208(%esp), %edi
movdqu 64(%esp), %xmm4
movdqa %xmm6, (%esi)
movdqu %xmm4, (%edi)
addl $0xa0, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_decrypt_update_aesni,.-AES_GCM_decrypt_update_aesni
.text
.globl AES_GCM_decrypt_final_aesni
.type AES_GCM_decrypt_final_aesni,@function
.align 16
AES_GCM_decrypt_final_aesni:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $16, %esp
movl 36(%esp), %ebp
movl 56(%esp), %esi
movl 60(%esp), %edi
movdqa (%ebp), %xmm6
movdqa (%esi), %xmm5
movdqa (%edi), %xmm7
movdqa %xmm5, %xmm1
movdqa %xmm5, %xmm0
psrlq $63, %xmm1
psllq $0x01, %xmm0
pslldq $8, %xmm1
por %xmm1, %xmm0
pshufd $0xff, %xmm5, %xmm5
psrad $31, %xmm5
pand L_aes_gcm_mod2_128, %xmm5
pxor %xmm0, %xmm5
movl 48(%esp), %edx
movl 52(%esp), %ecx
shll $3, %edx
shll $3, %ecx
pinsrd $0x00, %edx, %xmm0
pinsrd $2, %ecx, %xmm0
movl 48(%esp), %edx
movl 52(%esp), %ecx
shrl $29, %edx
shrl $29, %ecx
pinsrd $0x01, %edx, %xmm0
pinsrd $3, %ecx, %xmm0
pxor %xmm0, %xmm6
pshufd $0x4e, %xmm5, %xmm1
pshufd $0x4e, %xmm6, %xmm2
movdqa %xmm6, %xmm3
movdqa %xmm6, %xmm0
pclmulqdq $0x11, %xmm5, %xmm3
pclmulqdq $0x00, %xmm5, %xmm0
pxor %xmm5, %xmm1
pxor %xmm6, %xmm2
pclmulqdq $0x00, %xmm2, %xmm1
pxor %xmm0, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm2
movdqa %xmm3, %xmm6
pslldq $8, %xmm2
psrldq $8, %xmm1
pxor %xmm2, %xmm0
pxor %xmm1, %xmm6
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
pslld $31, %xmm1
pslld $30, %xmm2
pslld $25, %xmm3
pxor %xmm2, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm3
psrldq $4, %xmm3
pslldq $12, %xmm1
pxor %xmm1, %xmm0
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
psrld $0x01, %xmm1
psrld $2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm0, %xmm1
psrld $7, %xmm0
pxor %xmm3, %xmm1
pxor %xmm0, %xmm1
pxor %xmm1, %xmm6
pshufb L_aes_gcm_bswap_mask, %xmm6
movdqu %xmm7, %xmm0
pxor %xmm6, %xmm0
movl 40(%esp), %esi
movl 64(%esp), %edi
cmpl $16, 44(%esp)
je L_AES_GCM_decrypt_final_aesni_cmp_tag_16
subl $16, %esp
xorl %ecx, %ecx
xorl %ebx, %ebx
movdqu %xmm0, (%esp)
L_AES_GCM_decrypt_final_aesni_cmp_tag_loop:
movzbl (%esp,%ecx,1), %eax
xorb (%esi,%ecx,1), %al
orb %al, %bl
incl %ecx
cmpl 44(%esp), %ecx
jne L_AES_GCM_decrypt_final_aesni_cmp_tag_loop
cmpb $0x00, %bl
sete %bl
addl $16, %esp
xorl %ecx, %ecx
jmp L_AES_GCM_decrypt_final_aesni_cmp_tag_done
L_AES_GCM_decrypt_final_aesni_cmp_tag_16:
movdqu (%esi), %xmm1
pcmpeqb %xmm1, %xmm0
pmovmskb %xmm0, %edx
# %%edx == 0xFFFF then return 1 else => return 0
xorl %ebx, %ebx
cmpl $0xffff, %edx
sete %bl
L_AES_GCM_decrypt_final_aesni_cmp_tag_done:
movl %ebx, (%edi)
addl $16, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_decrypt_final_aesni,.-AES_GCM_decrypt_final_aesni
#endif /* WOLFSSL_AESGCM_STREAM */
#ifdef HAVE_INTEL_AVX1
.text
.globl AES_GCM_encrypt_avx1
.type AES_GCM_encrypt_avx1,@function
.align 16
AES_GCM_encrypt_avx1:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $0x70, %esp
movl 144(%esp), %esi
movl 168(%esp), %ebp
movl 160(%esp), %edx
vpxor %xmm0, %xmm0, %xmm0
vpxor %xmm2, %xmm2, %xmm2
cmpl $12, %edx
jne L_AES_GCM_encrypt_avx1_iv_not_12
# # Calculate values when IV is 12 bytes
# Set counter based on IV
movl $0x1000000, %ecx
vpinsrd $0x00, (%esi), %xmm0, %xmm0
vpinsrd $0x01, 4(%esi), %xmm0, %xmm0
vpinsrd $2, 8(%esi), %xmm0, %xmm0
vpinsrd $3, %ecx, %xmm0, %xmm0
# H = Encrypt X(=0) and T = Encrypt counter
vmovdqa (%ebp), %xmm1
vpxor %xmm1, %xmm0, %xmm5
vmovdqa 16(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 32(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 48(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 64(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 80(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 96(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 112(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 128(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 144(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
cmpl $11, 172(%esp)
vmovdqa 160(%ebp), %xmm3
jl L_AES_GCM_encrypt_avx1_calc_iv_12_last
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 176(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
cmpl $13, 172(%esp)
vmovdqa 192(%ebp), %xmm3
jl L_AES_GCM_encrypt_avx1_calc_iv_12_last
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 208(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 224(%ebp), %xmm3
L_AES_GCM_encrypt_avx1_calc_iv_12_last:
vaesenclast %xmm3, %xmm1, %xmm1
vaesenclast %xmm3, %xmm5, %xmm5
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm1, %xmm1
vmovdqu %xmm5, 80(%esp)
jmp L_AES_GCM_encrypt_avx1_iv_done
L_AES_GCM_encrypt_avx1_iv_not_12:
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
vmovdqa (%ebp), %xmm1
vaesenc 16(%ebp), %xmm1, %xmm1
vaesenc 32(%ebp), %xmm1, %xmm1
vaesenc 48(%ebp), %xmm1, %xmm1
vaesenc 64(%ebp), %xmm1, %xmm1
vaesenc 80(%ebp), %xmm1, %xmm1
vaesenc 96(%ebp), %xmm1, %xmm1
vaesenc 112(%ebp), %xmm1, %xmm1
vaesenc 128(%ebp), %xmm1, %xmm1
vaesenc 144(%ebp), %xmm1, %xmm1
cmpl $11, 172(%esp)
vmovdqa 160(%ebp), %xmm5
jl L_AES_GCM_encrypt_avx1_calc_iv_1_aesenc_avx_last
vaesenc %xmm5, %xmm1, %xmm1
vaesenc 176(%ebp), %xmm1, %xmm1
cmpl $13, 172(%esp)
vmovdqa 192(%ebp), %xmm5
jl L_AES_GCM_encrypt_avx1_calc_iv_1_aesenc_avx_last
vaesenc %xmm5, %xmm1, %xmm1
vaesenc 208(%ebp), %xmm1, %xmm1
vmovdqa 224(%ebp), %xmm5
L_AES_GCM_encrypt_avx1_calc_iv_1_aesenc_avx_last:
vaesenclast %xmm5, %xmm1, %xmm1
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm1, %xmm1
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movl $0x00, %ecx
je L_AES_GCM_encrypt_avx1_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_encrypt_avx1_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_encrypt_avx1_calc_iv_16_loop:
vmovdqu (%esi,%ecx,1), %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
# ghash_gfmul_avx
vpshufd $0x4e, %xmm0, %xmm5
vpshufd $0x4e, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm7
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpxor %xmm0, %xmm5, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqa %xmm4, %xmm3
vmovdqa %xmm7, %xmm0
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm5, %xmm0, %xmm0
vpsrld $31, %xmm3, %xmm4
vpsrld $31, %xmm0, %xmm5
vpslld $0x01, %xmm3, %xmm3
vpslld $0x01, %xmm0, %xmm0
vpsrldq $12, %xmm4, %xmm6
vpslldq $4, %xmm4, %xmm4
vpslldq $4, %xmm5, %xmm5
vpor %xmm6, %xmm0, %xmm0
vpor %xmm4, %xmm3, %xmm3
vpor %xmm5, %xmm0, %xmm0
vpslld $31, %xmm3, %xmm4
vpslld $30, %xmm3, %xmm5
vpslld $25, %xmm3, %xmm6
vpxor %xmm5, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vmovdqa %xmm4, %xmm5
vpsrldq $4, %xmm5, %xmm5
vpslldq $12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpsrld $0x01, %xmm3, %xmm6
vpsrld $2, %xmm3, %xmm7
vpsrld $7, %xmm3, %xmm4
vpxor %xmm7, %xmm6, %xmm6
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm6, %xmm0, %xmm0
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx1_calc_iv_16_loop
movl 160(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_encrypt_avx1_calc_iv_done
L_AES_GCM_encrypt_avx1_calc_iv_lt16:
subl $16, %esp
vpxor %xmm4, %xmm4, %xmm4
xorl %ebx, %ebx
vmovdqu %xmm4, (%esp)
L_AES_GCM_encrypt_avx1_calc_iv_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx1_calc_iv_loop
vmovdqu (%esp), %xmm4
addl $16, %esp
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
# ghash_gfmul_avx
vpshufd $0x4e, %xmm0, %xmm5
vpshufd $0x4e, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm7
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpxor %xmm0, %xmm5, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqa %xmm4, %xmm3
vmovdqa %xmm7, %xmm0
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm5, %xmm0, %xmm0
vpsrld $31, %xmm3, %xmm4
vpsrld $31, %xmm0, %xmm5
vpslld $0x01, %xmm3, %xmm3
vpslld $0x01, %xmm0, %xmm0
vpsrldq $12, %xmm4, %xmm6
vpslldq $4, %xmm4, %xmm4
vpslldq $4, %xmm5, %xmm5
vpor %xmm6, %xmm0, %xmm0
vpor %xmm4, %xmm3, %xmm3
vpor %xmm5, %xmm0, %xmm0
vpslld $31, %xmm3, %xmm4
vpslld $30, %xmm3, %xmm5
vpslld $25, %xmm3, %xmm6
vpxor %xmm5, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vmovdqa %xmm4, %xmm5
vpsrldq $4, %xmm5, %xmm5
vpslldq $12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpsrld $0x01, %xmm3, %xmm6
vpsrld $2, %xmm3, %xmm7
vpsrld $7, %xmm3, %xmm4
vpxor %xmm7, %xmm6, %xmm6
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm6, %xmm0, %xmm0
L_AES_GCM_encrypt_avx1_calc_iv_done:
# T = Encrypt counter
vpxor %xmm4, %xmm4, %xmm4
shll $3, %edx
vpinsrd $0x00, %edx, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
# ghash_gfmul_avx
vpshufd $0x4e, %xmm0, %xmm5
vpshufd $0x4e, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm7
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpxor %xmm0, %xmm5, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqa %xmm4, %xmm3
vmovdqa %xmm7, %xmm0
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm5, %xmm0, %xmm0
vpsrld $31, %xmm3, %xmm4
vpsrld $31, %xmm0, %xmm5
vpslld $0x01, %xmm3, %xmm3
vpslld $0x01, %xmm0, %xmm0
vpsrldq $12, %xmm4, %xmm6
vpslldq $4, %xmm4, %xmm4
vpslldq $4, %xmm5, %xmm5
vpor %xmm6, %xmm0, %xmm0
vpor %xmm4, %xmm3, %xmm3
vpor %xmm5, %xmm0, %xmm0
vpslld $31, %xmm3, %xmm4
vpslld $30, %xmm3, %xmm5
vpslld $25, %xmm3, %xmm6
vpxor %xmm5, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vmovdqa %xmm4, %xmm5
vpsrldq $4, %xmm5, %xmm5
vpslldq $12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpsrld $0x01, %xmm3, %xmm6
vpsrld $2, %xmm3, %xmm7
vpsrld $7, %xmm3, %xmm4
vpxor %xmm7, %xmm6, %xmm6
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm6, %xmm0, %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
# Encrypt counter
vmovdqa (%ebp), %xmm4
vpxor %xmm0, %xmm4, %xmm4
vaesenc 16(%ebp), %xmm4, %xmm4
vaesenc 32(%ebp), %xmm4, %xmm4
vaesenc 48(%ebp), %xmm4, %xmm4
vaesenc 64(%ebp), %xmm4, %xmm4
vaesenc 80(%ebp), %xmm4, %xmm4
vaesenc 96(%ebp), %xmm4, %xmm4
vaesenc 112(%ebp), %xmm4, %xmm4
vaesenc 128(%ebp), %xmm4, %xmm4
vaesenc 144(%ebp), %xmm4, %xmm4
cmpl $11, 172(%esp)
vmovdqa 160(%ebp), %xmm5
jl L_AES_GCM_encrypt_avx1_calc_iv_2_aesenc_avx_last
vaesenc %xmm5, %xmm4, %xmm4
vaesenc 176(%ebp), %xmm4, %xmm4
cmpl $13, 172(%esp)
vmovdqa 192(%ebp), %xmm5
jl L_AES_GCM_encrypt_avx1_calc_iv_2_aesenc_avx_last
vaesenc %xmm5, %xmm4, %xmm4
vaesenc 208(%ebp), %xmm4, %xmm4
vmovdqa 224(%ebp), %xmm5
L_AES_GCM_encrypt_avx1_calc_iv_2_aesenc_avx_last:
vaesenclast %xmm5, %xmm4, %xmm4
vmovdqu %xmm4, 80(%esp)
L_AES_GCM_encrypt_avx1_iv_done:
movl 140(%esp), %esi
# Additional authentication data
movl 156(%esp), %edx
cmpl $0x00, %edx
je L_AES_GCM_encrypt_avx1_calc_aad_done
xorl %ecx, %ecx
cmpl $16, %edx
jl L_AES_GCM_encrypt_avx1_calc_aad_lt16
andl $0xfffffff0, %edx
L_AES_GCM_encrypt_avx1_calc_aad_16_loop:
vmovdqu (%esi,%ecx,1), %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
# ghash_gfmul_avx
vpshufd $0x4e, %xmm2, %xmm5
vpshufd $0x4e, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm4
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqa %xmm4, %xmm3
vmovdqa %xmm7, %xmm2
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm5, %xmm2, %xmm2
vpsrld $31, %xmm3, %xmm4
vpsrld $31, %xmm2, %xmm5
vpslld $0x01, %xmm3, %xmm3
vpslld $0x01, %xmm2, %xmm2
vpsrldq $12, %xmm4, %xmm6
vpslldq $4, %xmm4, %xmm4
vpslldq $4, %xmm5, %xmm5
vpor %xmm6, %xmm2, %xmm2
vpor %xmm4, %xmm3, %xmm3
vpor %xmm5, %xmm2, %xmm2
vpslld $31, %xmm3, %xmm4
vpslld $30, %xmm3, %xmm5
vpslld $25, %xmm3, %xmm6
vpxor %xmm5, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vmovdqa %xmm4, %xmm5
vpsrldq $4, %xmm5, %xmm5
vpslldq $12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpsrld $0x01, %xmm3, %xmm6
vpsrld $2, %xmm3, %xmm7
vpsrld $7, %xmm3, %xmm4
vpxor %xmm7, %xmm6, %xmm6
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm6, %xmm2, %xmm2
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx1_calc_aad_16_loop
movl 156(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_encrypt_avx1_calc_aad_done
L_AES_GCM_encrypt_avx1_calc_aad_lt16:
subl $16, %esp
vpxor %xmm4, %xmm4, %xmm4
xorl %ebx, %ebx
vmovdqu %xmm4, (%esp)
L_AES_GCM_encrypt_avx1_calc_aad_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx1_calc_aad_loop
vmovdqu (%esp), %xmm4
addl $16, %esp
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
# ghash_gfmul_avx
vpshufd $0x4e, %xmm2, %xmm5
vpshufd $0x4e, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm4
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqa %xmm4, %xmm3
vmovdqa %xmm7, %xmm2
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm5, %xmm2, %xmm2
vpsrld $31, %xmm3, %xmm4
vpsrld $31, %xmm2, %xmm5
vpslld $0x01, %xmm3, %xmm3
vpslld $0x01, %xmm2, %xmm2
vpsrldq $12, %xmm4, %xmm6
vpslldq $4, %xmm4, %xmm4
vpslldq $4, %xmm5, %xmm5
vpor %xmm6, %xmm2, %xmm2
vpor %xmm4, %xmm3, %xmm3
vpor %xmm5, %xmm2, %xmm2
vpslld $31, %xmm3, %xmm4
vpslld $30, %xmm3, %xmm5
vpslld $25, %xmm3, %xmm6
vpxor %xmm5, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vmovdqa %xmm4, %xmm5
vpsrldq $4, %xmm5, %xmm5
vpslldq $12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpsrld $0x01, %xmm3, %xmm6
vpsrld $2, %xmm3, %xmm7
vpsrld $7, %xmm3, %xmm4
vpxor %xmm7, %xmm6, %xmm6
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm6, %xmm2, %xmm2
L_AES_GCM_encrypt_avx1_calc_aad_done:
vmovdqu %xmm2, 96(%esp)
movl 132(%esp), %esi
movl 136(%esp), %edi
# Calculate counter and H
vpsrlq $63, %xmm1, %xmm5
vpsllq $0x01, %xmm1, %xmm4
vpslldq $8, %xmm5, %xmm5
vpor %xmm5, %xmm4, %xmm4
vpshufd $0xff, %xmm1, %xmm1
vpsrad $31, %xmm1, %xmm1
vpshufb L_aes_gcm_avx1_bswap_epi64, %xmm0, %xmm0
vpand L_aes_gcm_avx1_mod2_128, %xmm1, %xmm1
vpaddd L_aes_gcm_avx1_one, %xmm0, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, 64(%esp)
xorl %ebx, %ebx
cmpl $0x40, 152(%esp)
movl 152(%esp), %eax
jl L_AES_GCM_encrypt_avx1_done_64
andl $0xffffffc0, %eax
vmovdqa %xmm2, %xmm6
# H ^ 1
vmovdqu %xmm1, (%esp)
# H ^ 2
vpclmulqdq $0x00, %xmm1, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm1, %xmm1, %xmm0
vpslld $31, %xmm4, %xmm5
vpslld $30, %xmm4, %xmm6
vpslld $25, %xmm4, %xmm7
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpsrldq $4, %xmm5, %xmm7
vpslldq $12, %xmm5, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm5
vpsrld $2, %xmm4, %xmm6
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpsrld $7, %xmm4, %xmm4
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm0, %xmm0
vmovdqu %xmm0, 16(%esp)
# H ^ 3
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm1, %xmm5
vpshufd $0x4e, %xmm0, %xmm6
vpclmulqdq $0x11, %xmm1, %xmm0, %xmm7
vpclmulqdq $0x00, %xmm1, %xmm0, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm0, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm4, %xmm4
vpxor %xmm5, %xmm7, %xmm3
vpslld $31, %xmm4, %xmm5
vpslld $30, %xmm4, %xmm6
vpslld $25, %xmm4, %xmm7
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpsrldq $4, %xmm5, %xmm7
vpslldq $12, %xmm5, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm5
vpsrld $2, %xmm4, %xmm6
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpsrld $7, %xmm4, %xmm4
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm3, %xmm3
vmovdqu %xmm3, 32(%esp)
# H ^ 4
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm3
vpslld $31, %xmm4, %xmm5
vpslld $30, %xmm4, %xmm6
vpslld $25, %xmm4, %xmm7
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpsrldq $4, %xmm5, %xmm7
vpslldq $12, %xmm5, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm5
vpsrld $2, %xmm4, %xmm6
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpsrld $7, %xmm4, %xmm4
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm3, %xmm3
vmovdqu %xmm3, 48(%esp)
# First 64 bytes of input
vmovdqu 64(%esp), %xmm4
vmovdqa L_aes_gcm_avx1_bswap_epi64, %xmm3
vpaddd L_aes_gcm_avx1_one, %xmm4, %xmm5
vpshufb %xmm3, %xmm5, %xmm5
vpaddd L_aes_gcm_avx1_two, %xmm4, %xmm6
vpshufb %xmm3, %xmm6, %xmm6
vpaddd L_aes_gcm_avx1_three, %xmm4, %xmm7
vpshufb %xmm3, %xmm7, %xmm7
vpshufb %xmm3, %xmm4, %xmm4
vmovdqu 64(%esp), %xmm3
vpaddd L_aes_gcm_avx1_four, %xmm3, %xmm3
vmovdqu %xmm3, 64(%esp)
vmovdqa (%ebp), %xmm3
vpxor %xmm3, %xmm4, %xmm4
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm3, %xmm7, %xmm7
vmovdqa 16(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 32(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 48(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 64(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 80(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 96(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 112(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 128(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 144(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
cmpl $11, 172(%esp)
vmovdqa 160(%ebp), %xmm3
jl L_AES_GCM_encrypt_avx1_aesenc_64_enc_done
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 176(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
cmpl $13, 172(%esp)
vmovdqa 192(%ebp), %xmm3
jl L_AES_GCM_encrypt_avx1_aesenc_64_enc_done
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 208(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 224(%ebp), %xmm3
L_AES_GCM_encrypt_avx1_aesenc_64_enc_done:
vaesenclast %xmm3, %xmm4, %xmm4
vaesenclast %xmm3, %xmm5, %xmm5
vmovdqu (%esi), %xmm0
vmovdqu 16(%esi), %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vmovdqu %xmm0, (%esi)
vmovdqu %xmm1, 16(%esi)
vmovdqu %xmm4, (%edi)
vmovdqu %xmm5, 16(%edi)
vaesenclast %xmm3, %xmm6, %xmm6
vaesenclast %xmm3, %xmm7, %xmm7
vmovdqu 32(%esi), %xmm0
vmovdqu 48(%esi), %xmm1
vpxor %xmm0, %xmm6, %xmm6
vpxor %xmm1, %xmm7, %xmm7
vmovdqu %xmm0, 32(%esi)
vmovdqu %xmm1, 48(%esi)
vmovdqu %xmm6, 32(%edi)
vmovdqu %xmm7, 48(%edi)
cmpl $0x40, %eax
movl $0x40, %ebx
movl %esi, %ecx
movl %edi, %edx
jle L_AES_GCM_encrypt_avx1_end_64
# More 64 bytes of input
L_AES_GCM_encrypt_avx1_ghash_64:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
vmovdqu 64(%esp), %xmm4
vmovdqa L_aes_gcm_avx1_bswap_epi64, %xmm3
vpaddd L_aes_gcm_avx1_one, %xmm4, %xmm5
vpshufb %xmm3, %xmm5, %xmm5
vpaddd L_aes_gcm_avx1_two, %xmm4, %xmm6
vpshufb %xmm3, %xmm6, %xmm6
vpaddd L_aes_gcm_avx1_three, %xmm4, %xmm7
vpshufb %xmm3, %xmm7, %xmm7
vpshufb %xmm3, %xmm4, %xmm4
vmovdqu 64(%esp), %xmm3
vpaddd L_aes_gcm_avx1_four, %xmm3, %xmm3
vmovdqu %xmm3, 64(%esp)
vmovdqa (%ebp), %xmm3
vpxor %xmm3, %xmm4, %xmm4
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm3, %xmm7, %xmm7
vmovdqa 16(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 32(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 48(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 64(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 80(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 96(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 112(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 128(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 144(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
cmpl $11, 172(%esp)
vmovdqa 160(%ebp), %xmm3
jl L_AES_GCM_encrypt_avx1_aesenc_64_ghash_avx_aesenc_64_enc_done
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 176(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
cmpl $13, 172(%esp)
vmovdqa 192(%ebp), %xmm3
jl L_AES_GCM_encrypt_avx1_aesenc_64_ghash_avx_aesenc_64_enc_done
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 208(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 224(%ebp), %xmm3
L_AES_GCM_encrypt_avx1_aesenc_64_ghash_avx_aesenc_64_enc_done:
vaesenclast %xmm3, %xmm4, %xmm4
vaesenclast %xmm3, %xmm5, %xmm5
vmovdqu (%ecx), %xmm0
vmovdqu 16(%ecx), %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vmovdqu %xmm4, (%edx)
vmovdqu %xmm5, 16(%edx)
vaesenclast %xmm3, %xmm6, %xmm6
vaesenclast %xmm3, %xmm7, %xmm7
vmovdqu 32(%ecx), %xmm0
vmovdqu 48(%ecx), %xmm1
vpxor %xmm0, %xmm6, %xmm6
vpxor %xmm1, %xmm7, %xmm7
vmovdqu %xmm6, 32(%edx)
vmovdqu %xmm7, 48(%edx)
# ghash encrypted counter
vmovdqu 96(%esp), %xmm6
vmovdqu 48(%esp), %xmm3
vmovdqu -64(%edx), %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vpshufd $0x4e, %xmm3, %xmm5
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm4, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm3, %xmm4, %xmm7
vpclmulqdq $0x00, %xmm3, %xmm4, %xmm6
vpclmulqdq $0x00, %xmm1, %xmm5, %xmm5
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqu 32(%esp), %xmm3
vmovdqu -48(%edx), %xmm4
vpshufd $0x4e, %xmm3, %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm3, %xmm0, %xmm0
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm3, %xmm4, %xmm2
vpclmulqdq $0x00, %xmm3, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm1, %xmm0, %xmm0
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm0, %xmm5, %xmm5
vmovdqu 16(%esp), %xmm3
vmovdqu -32(%edx), %xmm4
vpshufd $0x4e, %xmm3, %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm3, %xmm0, %xmm0
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm3, %xmm4, %xmm2
vpclmulqdq $0x00, %xmm3, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm1, %xmm0, %xmm0
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm0, %xmm5, %xmm5
vmovdqu (%esp), %xmm3
vmovdqu -16(%edx), %xmm4
vpshufd $0x4e, %xmm3, %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm3, %xmm0, %xmm0
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm3, %xmm4, %xmm2
vpclmulqdq $0x00, %xmm3, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm1, %xmm0, %xmm0
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm0, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm1
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
vpslld $31, %xmm6, %xmm3
vpslld $30, %xmm6, %xmm0
vpslld $25, %xmm6, %xmm1
vpxor %xmm0, %xmm3, %xmm3
vpxor %xmm1, %xmm3, %xmm3
vpsrldq $4, %xmm3, %xmm0
vpslldq $12, %xmm3, %xmm3
vpxor %xmm3, %xmm6, %xmm6
vpsrld $0x01, %xmm6, %xmm1
vpsrld $2, %xmm6, %xmm5
vpsrld $7, %xmm6, %xmm4
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm6
vmovdqu %xmm6, 96(%esp)
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_encrypt_avx1_ghash_64
L_AES_GCM_encrypt_avx1_end_64:
vmovdqu 96(%esp), %xmm2
# Block 1
vmovdqa L_aes_gcm_avx1_bswap_mask, %xmm4
vmovdqa (%edx), %xmm1
vpshufb %xmm4, %xmm1, %xmm1
vmovdqu 48(%esp), %xmm3
vpxor %xmm2, %xmm1, %xmm1
# ghash_gfmul_avx
vpshufd $0x4e, %xmm1, %xmm5
vpshufd $0x4e, %xmm3, %xmm6
vpclmulqdq $0x11, %xmm1, %xmm3, %xmm7
vpclmulqdq $0x00, %xmm1, %xmm3, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqa %xmm4, %xmm0
vmovdqa %xmm7, %xmm2
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm0, %xmm0
vpxor %xmm5, %xmm2, %xmm2
# Block 2
vmovdqa L_aes_gcm_avx1_bswap_mask, %xmm4
vmovdqa 16(%edx), %xmm1
vpshufb %xmm4, %xmm1, %xmm1
vmovdqu 32(%esp), %xmm3
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm1, %xmm5
vpshufd $0x4e, %xmm3, %xmm6
vpclmulqdq $0x11, %xmm1, %xmm3, %xmm7
vpclmulqdq $0x00, %xmm1, %xmm3, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm0, %xmm0
vpxor %xmm7, %xmm2, %xmm2
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm0, %xmm0
vpxor %xmm5, %xmm2, %xmm2
# Block 3
vmovdqa L_aes_gcm_avx1_bswap_mask, %xmm4
vmovdqa 32(%edx), %xmm1
vpshufb %xmm4, %xmm1, %xmm1
vmovdqu 16(%esp), %xmm3
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm1, %xmm5
vpshufd $0x4e, %xmm3, %xmm6
vpclmulqdq $0x11, %xmm1, %xmm3, %xmm7
vpclmulqdq $0x00, %xmm1, %xmm3, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm0, %xmm0
vpxor %xmm7, %xmm2, %xmm2
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm0, %xmm0
vpxor %xmm5, %xmm2, %xmm2
# Block 4
vmovdqa L_aes_gcm_avx1_bswap_mask, %xmm4
vmovdqa 48(%edx), %xmm1
vpshufb %xmm4, %xmm1, %xmm1
vmovdqu (%esp), %xmm3
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm1, %xmm5
vpshufd $0x4e, %xmm3, %xmm6
vpclmulqdq $0x11, %xmm1, %xmm3, %xmm7
vpclmulqdq $0x00, %xmm1, %xmm3, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm0, %xmm0
vpxor %xmm7, %xmm2, %xmm2
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm0, %xmm0
vpxor %xmm5, %xmm2, %xmm2
vpslld $31, %xmm0, %xmm4
vpslld $30, %xmm0, %xmm5
vpslld $25, %xmm0, %xmm6
vpxor %xmm5, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vmovdqa %xmm4, %xmm5
vpsrldq $4, %xmm5, %xmm5
vpslldq $12, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
vpsrld $0x01, %xmm0, %xmm6
vpsrld $2, %xmm0, %xmm7
vpsrld $7, %xmm0, %xmm4
vpxor %xmm7, %xmm6, %xmm6
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm0, %xmm6, %xmm6
vpxor %xmm6, %xmm2, %xmm2
vmovdqu (%esp), %xmm1
L_AES_GCM_encrypt_avx1_done_64:
movl 152(%esp), %edx
cmpl %edx, %ebx
jge L_AES_GCM_encrypt_avx1_done_enc
movl 152(%esp), %eax
andl $0xfffffff0, %eax
cmpl %eax, %ebx
jge L_AES_GCM_encrypt_avx1_last_block_done
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
vmovdqu 64(%esp), %xmm5
vpshufb L_aes_gcm_avx1_bswap_epi64, %xmm5, %xmm4
vpaddd L_aes_gcm_avx1_one, %xmm5, %xmm5
vmovdqu %xmm5, 64(%esp)
vpxor (%ebp), %xmm4, %xmm4
vaesenc 16(%ebp), %xmm4, %xmm4
vaesenc 32(%ebp), %xmm4, %xmm4
vaesenc 48(%ebp), %xmm4, %xmm4
vaesenc 64(%ebp), %xmm4, %xmm4
vaesenc 80(%ebp), %xmm4, %xmm4
vaesenc 96(%ebp), %xmm4, %xmm4
vaesenc 112(%ebp), %xmm4, %xmm4
vaesenc 128(%ebp), %xmm4, %xmm4
vaesenc 144(%ebp), %xmm4, %xmm4
cmpl $11, 172(%esp)
vmovdqa 160(%ebp), %xmm5
jl L_AES_GCM_encrypt_avx1_aesenc_block_aesenc_avx_last
vaesenc %xmm5, %xmm4, %xmm4
vaesenc 176(%ebp), %xmm4, %xmm4
cmpl $13, 172(%esp)
vmovdqa 192(%ebp), %xmm5
jl L_AES_GCM_encrypt_avx1_aesenc_block_aesenc_avx_last
vaesenc %xmm5, %xmm4, %xmm4
vaesenc 208(%ebp), %xmm4, %xmm4
vmovdqa 224(%ebp), %xmm5
L_AES_GCM_encrypt_avx1_aesenc_block_aesenc_avx_last:
vaesenclast %xmm5, %xmm4, %xmm4
vmovdqu (%ecx), %xmm5
vpxor %xmm5, %xmm4, %xmm4
vmovdqu %xmm4, (%edx)
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
addl $16, %ebx
cmpl %eax, %ebx
jge L_AES_GCM_encrypt_avx1_last_block_ghash
L_AES_GCM_encrypt_avx1_last_block_start:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
vmovdqu 64(%esp), %xmm5
vmovdqu %xmm2, %xmm7
vpshufb L_aes_gcm_avx1_bswap_epi64, %xmm5, %xmm4
vpaddd L_aes_gcm_avx1_one, %xmm5, %xmm5
vmovdqu %xmm5, 64(%esp)
vpxor (%ebp), %xmm4, %xmm4
vpclmulqdq $16, %xmm1, %xmm7, %xmm0
vaesenc 16(%ebp), %xmm4, %xmm4
vaesenc 32(%ebp), %xmm4, %xmm4
vpclmulqdq $0x01, %xmm1, %xmm7, %xmm3
vaesenc 48(%ebp), %xmm4, %xmm4
vaesenc 64(%ebp), %xmm4, %xmm4
vaesenc 80(%ebp), %xmm4, %xmm4
vpclmulqdq $0x11, %xmm1, %xmm7, %xmm5
vaesenc 96(%ebp), %xmm4, %xmm4
vpxor %xmm3, %xmm0, %xmm0
vpslldq $8, %xmm0, %xmm6
vpsrldq $8, %xmm0, %xmm0
vaesenc 112(%ebp), %xmm4, %xmm4
vpclmulqdq $0x00, %xmm1, %xmm7, %xmm3
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm0, %xmm5, %xmm5
vmovdqa L_aes_gcm_avx1_mod2_128, %xmm7
vpclmulqdq $16, %xmm7, %xmm6, %xmm3
vaesenc 128(%ebp), %xmm4, %xmm4
vpshufd $0x4e, %xmm6, %xmm0
vpxor %xmm3, %xmm0, %xmm0
vpclmulqdq $16, %xmm7, %xmm0, %xmm3
vaesenc 144(%ebp), %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm2
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm5, %xmm2, %xmm2
cmpl $11, 172(%esp)
vmovdqa 160(%ebp), %xmm5
jl L_AES_GCM_encrypt_avx1_aesenc_gfmul_last
vaesenc %xmm5, %xmm4, %xmm4
vaesenc 176(%ebp), %xmm4, %xmm4
cmpl $13, 172(%esp)
vmovdqa 192(%ebp), %xmm5
jl L_AES_GCM_encrypt_avx1_aesenc_gfmul_last
vaesenc %xmm5, %xmm4, %xmm4
vaesenc 208(%ebp), %xmm4, %xmm4
vmovdqa 224(%ebp), %xmm5
L_AES_GCM_encrypt_avx1_aesenc_gfmul_last:
vaesenclast %xmm5, %xmm4, %xmm4
vmovdqu (%ecx), %xmm5
vpxor %xmm5, %xmm4, %xmm4
vmovdqu %xmm4, (%edx)
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
addl $16, %ebx
vpxor %xmm4, %xmm2, %xmm2
cmpl %eax, %ebx
jl L_AES_GCM_encrypt_avx1_last_block_start
L_AES_GCM_encrypt_avx1_last_block_ghash:
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm1, %xmm5
vpshufd $0x4e, %xmm2, %xmm6
vpclmulqdq $0x11, %xmm1, %xmm2, %xmm7
vpclmulqdq $0x00, %xmm1, %xmm2, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm4, %xmm4
vpxor %xmm5, %xmm7, %xmm2
vpslld $31, %xmm4, %xmm5
vpslld $30, %xmm4, %xmm6
vpslld $25, %xmm4, %xmm7
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpsrldq $4, %xmm5, %xmm7
vpslldq $12, %xmm5, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm5
vpsrld $2, %xmm4, %xmm6
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpsrld $7, %xmm4, %xmm4
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm2, %xmm2
L_AES_GCM_encrypt_avx1_last_block_done:
movl 152(%esp), %ecx
movl %ecx, %edx
andl $15, %ecx
jz L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_done
vmovdqu 64(%esp), %xmm0
vpshufb L_aes_gcm_avx1_bswap_epi64, %xmm0, %xmm0
vpxor (%ebp), %xmm0, %xmm0
vaesenc 16(%ebp), %xmm0, %xmm0
vaesenc 32(%ebp), %xmm0, %xmm0
vaesenc 48(%ebp), %xmm0, %xmm0
vaesenc 64(%ebp), %xmm0, %xmm0
vaesenc 80(%ebp), %xmm0, %xmm0
vaesenc 96(%ebp), %xmm0, %xmm0
vaesenc 112(%ebp), %xmm0, %xmm0
vaesenc 128(%ebp), %xmm0, %xmm0
vaesenc 144(%ebp), %xmm0, %xmm0
cmpl $11, 172(%esp)
vmovdqa 160(%ebp), %xmm5
jl L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_aesenc_avx_last
vaesenc %xmm5, %xmm0, %xmm0
vaesenc 176(%ebp), %xmm0, %xmm0
cmpl $13, 172(%esp)
vmovdqa 192(%ebp), %xmm5
jl L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_aesenc_avx_last
vaesenc %xmm5, %xmm0, %xmm0
vaesenc 208(%ebp), %xmm0, %xmm0
vmovdqa 224(%ebp), %xmm5
L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_aesenc_avx_last:
vaesenclast %xmm5, %xmm0, %xmm0
subl $16, %esp
xorl %ecx, %ecx
vmovdqu %xmm0, (%esp)
L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_loop:
movzbl (%esi,%ebx,1), %eax
xorb (%esp,%ecx,1), %al
movb %al, (%edi,%ebx,1)
movb %al, (%esp,%ecx,1)
incl %ebx
incl %ecx
cmpl %edx, %ebx
jl L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_loop
xorl %eax, %eax
cmpl $16, %ecx
je L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_finish_enc
L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_byte_loop:
movb %al, (%esp,%ecx,1)
incl %ecx
cmpl $16, %ecx
jl L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_byte_loop
L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_finish_enc:
vmovdqu (%esp), %xmm0
addl $16, %esp
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm2, %xmm2
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm1, %xmm5
vpshufd $0x4e, %xmm2, %xmm6
vpclmulqdq $0x11, %xmm1, %xmm2, %xmm7
vpclmulqdq $0x00, %xmm1, %xmm2, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm4, %xmm4
vpxor %xmm5, %xmm7, %xmm2
vpslld $31, %xmm4, %xmm5
vpslld $30, %xmm4, %xmm6
vpslld $25, %xmm4, %xmm7
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpsrldq $4, %xmm5, %xmm7
vpslldq $12, %xmm5, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm5
vpsrld $2, %xmm4, %xmm6
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpsrld $7, %xmm4, %xmm4
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm2, %xmm2
L_AES_GCM_encrypt_avx1_aesenc_last15_enc_avx_done:
L_AES_GCM_encrypt_avx1_done_enc:
movl 148(%esp), %edi
movl 164(%esp), %ebx
movl 152(%esp), %edx
movl 156(%esp), %ecx
shll $3, %edx
shll $3, %ecx
vpinsrd $0x00, %edx, %xmm4, %xmm4
vpinsrd $2, %ecx, %xmm4, %xmm4
movl 152(%esp), %edx
movl 156(%esp), %ecx
shrl $29, %edx
shrl $29, %ecx
vpinsrd $0x01, %edx, %xmm4, %xmm4
vpinsrd $3, %ecx, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm1, %xmm5
vpshufd $0x4e, %xmm2, %xmm6
vpclmulqdq $0x11, %xmm1, %xmm2, %xmm7
vpclmulqdq $0x00, %xmm1, %xmm2, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm4, %xmm4
vpxor %xmm5, %xmm7, %xmm2
vpslld $31, %xmm4, %xmm5
vpslld $30, %xmm4, %xmm6
vpslld $25, %xmm4, %xmm7
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpsrldq $4, %xmm5, %xmm7
vpslldq $12, %xmm5, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm5
vpsrld $2, %xmm4, %xmm6
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpsrld $7, %xmm4, %xmm4
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm2, %xmm2
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm2, %xmm2
vpxor 80(%esp), %xmm2, %xmm4
cmpl $16, %ebx
je L_AES_GCM_encrypt_avx1_store_tag_16
xorl %ecx, %ecx
vmovdqu %xmm4, (%esp)
L_AES_GCM_encrypt_avx1_store_tag_loop:
movzbl (%esp,%ecx,1), %eax
movb %al, (%edi,%ecx,1)
incl %ecx
cmpl %ebx, %ecx
jne L_AES_GCM_encrypt_avx1_store_tag_loop
jmp L_AES_GCM_encrypt_avx1_store_tag_done
L_AES_GCM_encrypt_avx1_store_tag_16:
vmovdqu %xmm4, (%edi)
L_AES_GCM_encrypt_avx1_store_tag_done:
addl $0x70, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_encrypt_avx1,.-AES_GCM_encrypt_avx1
.text
.globl AES_GCM_decrypt_avx1
.type AES_GCM_decrypt_avx1,@function
.align 16
AES_GCM_decrypt_avx1:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $0xb0, %esp
movl 208(%esp), %esi
movl 232(%esp), %ebp
movl 224(%esp), %edx
vpxor %xmm0, %xmm0, %xmm0
vpxor %xmm2, %xmm2, %xmm2
cmpl $12, %edx
jne L_AES_GCM_decrypt_avx1_iv_not_12
# # Calculate values when IV is 12 bytes
# Set counter based on IV
movl $0x1000000, %ecx
vpinsrd $0x00, (%esi), %xmm0, %xmm0
vpinsrd $0x01, 4(%esi), %xmm0, %xmm0
vpinsrd $2, 8(%esi), %xmm0, %xmm0
vpinsrd $3, %ecx, %xmm0, %xmm0
# H = Encrypt X(=0) and T = Encrypt counter
vmovdqa (%ebp), %xmm1
vpxor %xmm1, %xmm0, %xmm5
vmovdqa 16(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 32(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 48(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 64(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 80(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 96(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 112(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 128(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 144(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
cmpl $11, 236(%esp)
vmovdqa 160(%ebp), %xmm3
jl L_AES_GCM_decrypt_avx1_calc_iv_12_last
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 176(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
cmpl $13, 236(%esp)
vmovdqa 192(%ebp), %xmm3
jl L_AES_GCM_decrypt_avx1_calc_iv_12_last
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 208(%ebp), %xmm3
vaesenc %xmm3, %xmm1, %xmm1
vaesenc %xmm3, %xmm5, %xmm5
vmovdqa 224(%ebp), %xmm3
L_AES_GCM_decrypt_avx1_calc_iv_12_last:
vaesenclast %xmm3, %xmm1, %xmm1
vaesenclast %xmm3, %xmm5, %xmm5
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm1, %xmm1
vmovdqu %xmm5, 80(%esp)
jmp L_AES_GCM_decrypt_avx1_iv_done
L_AES_GCM_decrypt_avx1_iv_not_12:
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
vmovdqa (%ebp), %xmm1
vaesenc 16(%ebp), %xmm1, %xmm1
vaesenc 32(%ebp), %xmm1, %xmm1
vaesenc 48(%ebp), %xmm1, %xmm1
vaesenc 64(%ebp), %xmm1, %xmm1
vaesenc 80(%ebp), %xmm1, %xmm1
vaesenc 96(%ebp), %xmm1, %xmm1
vaesenc 112(%ebp), %xmm1, %xmm1
vaesenc 128(%ebp), %xmm1, %xmm1
vaesenc 144(%ebp), %xmm1, %xmm1
cmpl $11, 236(%esp)
vmovdqa 160(%ebp), %xmm5
jl L_AES_GCM_decrypt_avx1_calc_iv_1_aesenc_avx_last
vaesenc %xmm5, %xmm1, %xmm1
vaesenc 176(%ebp), %xmm1, %xmm1
cmpl $13, 236(%esp)
vmovdqa 192(%ebp), %xmm5
jl L_AES_GCM_decrypt_avx1_calc_iv_1_aesenc_avx_last
vaesenc %xmm5, %xmm1, %xmm1
vaesenc 208(%ebp), %xmm1, %xmm1
vmovdqa 224(%ebp), %xmm5
L_AES_GCM_decrypt_avx1_calc_iv_1_aesenc_avx_last:
vaesenclast %xmm5, %xmm1, %xmm1
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm1, %xmm1
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movl $0x00, %ecx
je L_AES_GCM_decrypt_avx1_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_decrypt_avx1_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_decrypt_avx1_calc_iv_16_loop:
vmovdqu (%esi,%ecx,1), %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
# ghash_gfmul_avx
vpshufd $0x4e, %xmm0, %xmm5
vpshufd $0x4e, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm7
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpxor %xmm0, %xmm5, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqa %xmm4, %xmm3
vmovdqa %xmm7, %xmm0
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm5, %xmm0, %xmm0
vpsrld $31, %xmm3, %xmm4
vpsrld $31, %xmm0, %xmm5
vpslld $0x01, %xmm3, %xmm3
vpslld $0x01, %xmm0, %xmm0
vpsrldq $12, %xmm4, %xmm6
vpslldq $4, %xmm4, %xmm4
vpslldq $4, %xmm5, %xmm5
vpor %xmm6, %xmm0, %xmm0
vpor %xmm4, %xmm3, %xmm3
vpor %xmm5, %xmm0, %xmm0
vpslld $31, %xmm3, %xmm4
vpslld $30, %xmm3, %xmm5
vpslld $25, %xmm3, %xmm6
vpxor %xmm5, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vmovdqa %xmm4, %xmm5
vpsrldq $4, %xmm5, %xmm5
vpslldq $12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpsrld $0x01, %xmm3, %xmm6
vpsrld $2, %xmm3, %xmm7
vpsrld $7, %xmm3, %xmm4
vpxor %xmm7, %xmm6, %xmm6
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm6, %xmm0, %xmm0
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx1_calc_iv_16_loop
movl 224(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_decrypt_avx1_calc_iv_done
L_AES_GCM_decrypt_avx1_calc_iv_lt16:
subl $16, %esp
vpxor %xmm4, %xmm4, %xmm4
xorl %ebx, %ebx
vmovdqu %xmm4, (%esp)
L_AES_GCM_decrypt_avx1_calc_iv_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx1_calc_iv_loop
vmovdqu (%esp), %xmm4
addl $16, %esp
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
# ghash_gfmul_avx
vpshufd $0x4e, %xmm0, %xmm5
vpshufd $0x4e, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm7
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpxor %xmm0, %xmm5, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqa %xmm4, %xmm3
vmovdqa %xmm7, %xmm0
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm5, %xmm0, %xmm0
vpsrld $31, %xmm3, %xmm4
vpsrld $31, %xmm0, %xmm5
vpslld $0x01, %xmm3, %xmm3
vpslld $0x01, %xmm0, %xmm0
vpsrldq $12, %xmm4, %xmm6
vpslldq $4, %xmm4, %xmm4
vpslldq $4, %xmm5, %xmm5
vpor %xmm6, %xmm0, %xmm0
vpor %xmm4, %xmm3, %xmm3
vpor %xmm5, %xmm0, %xmm0
vpslld $31, %xmm3, %xmm4
vpslld $30, %xmm3, %xmm5
vpslld $25, %xmm3, %xmm6
vpxor %xmm5, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vmovdqa %xmm4, %xmm5
vpsrldq $4, %xmm5, %xmm5
vpslldq $12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpsrld $0x01, %xmm3, %xmm6
vpsrld $2, %xmm3, %xmm7
vpsrld $7, %xmm3, %xmm4
vpxor %xmm7, %xmm6, %xmm6
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm6, %xmm0, %xmm0
L_AES_GCM_decrypt_avx1_calc_iv_done:
# T = Encrypt counter
vpxor %xmm4, %xmm4, %xmm4
shll $3, %edx
vpinsrd $0x00, %edx, %xmm4, %xmm4
vpxor %xmm4, %xmm0, %xmm0
# ghash_gfmul_avx
vpshufd $0x4e, %xmm0, %xmm5
vpshufd $0x4e, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm7
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpxor %xmm0, %xmm5, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqa %xmm4, %xmm3
vmovdqa %xmm7, %xmm0
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm5, %xmm0, %xmm0
vpsrld $31, %xmm3, %xmm4
vpsrld $31, %xmm0, %xmm5
vpslld $0x01, %xmm3, %xmm3
vpslld $0x01, %xmm0, %xmm0
vpsrldq $12, %xmm4, %xmm6
vpslldq $4, %xmm4, %xmm4
vpslldq $4, %xmm5, %xmm5
vpor %xmm6, %xmm0, %xmm0
vpor %xmm4, %xmm3, %xmm3
vpor %xmm5, %xmm0, %xmm0
vpslld $31, %xmm3, %xmm4
vpslld $30, %xmm3, %xmm5
vpslld $25, %xmm3, %xmm6
vpxor %xmm5, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vmovdqa %xmm4, %xmm5
vpsrldq $4, %xmm5, %xmm5
vpslldq $12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpsrld $0x01, %xmm3, %xmm6
vpsrld $2, %xmm3, %xmm7
vpsrld $7, %xmm3, %xmm4
vpxor %xmm7, %xmm6, %xmm6
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm6, %xmm0, %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
# Encrypt counter
vmovdqa (%ebp), %xmm4
vpxor %xmm0, %xmm4, %xmm4
vaesenc 16(%ebp), %xmm4, %xmm4
vaesenc 32(%ebp), %xmm4, %xmm4
vaesenc 48(%ebp), %xmm4, %xmm4
vaesenc 64(%ebp), %xmm4, %xmm4
vaesenc 80(%ebp), %xmm4, %xmm4
vaesenc 96(%ebp), %xmm4, %xmm4
vaesenc 112(%ebp), %xmm4, %xmm4
vaesenc 128(%ebp), %xmm4, %xmm4
vaesenc 144(%ebp), %xmm4, %xmm4
cmpl $11, 236(%esp)
vmovdqa 160(%ebp), %xmm5
jl L_AES_GCM_decrypt_avx1_calc_iv_2_aesenc_avx_last
vaesenc %xmm5, %xmm4, %xmm4
vaesenc 176(%ebp), %xmm4, %xmm4
cmpl $13, 236(%esp)
vmovdqa 192(%ebp), %xmm5
jl L_AES_GCM_decrypt_avx1_calc_iv_2_aesenc_avx_last
vaesenc %xmm5, %xmm4, %xmm4
vaesenc 208(%ebp), %xmm4, %xmm4
vmovdqa 224(%ebp), %xmm5
L_AES_GCM_decrypt_avx1_calc_iv_2_aesenc_avx_last:
vaesenclast %xmm5, %xmm4, %xmm4
vmovdqu %xmm4, 80(%esp)
L_AES_GCM_decrypt_avx1_iv_done:
movl 204(%esp), %esi
# Additional authentication data
movl 220(%esp), %edx
cmpl $0x00, %edx
je L_AES_GCM_decrypt_avx1_calc_aad_done
xorl %ecx, %ecx
cmpl $16, %edx
jl L_AES_GCM_decrypt_avx1_calc_aad_lt16
andl $0xfffffff0, %edx
L_AES_GCM_decrypt_avx1_calc_aad_16_loop:
vmovdqu (%esi,%ecx,1), %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
# ghash_gfmul_avx
vpshufd $0x4e, %xmm2, %xmm5
vpshufd $0x4e, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm4
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqa %xmm4, %xmm3
vmovdqa %xmm7, %xmm2
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm5, %xmm2, %xmm2
vpsrld $31, %xmm3, %xmm4
vpsrld $31, %xmm2, %xmm5
vpslld $0x01, %xmm3, %xmm3
vpslld $0x01, %xmm2, %xmm2
vpsrldq $12, %xmm4, %xmm6
vpslldq $4, %xmm4, %xmm4
vpslldq $4, %xmm5, %xmm5
vpor %xmm6, %xmm2, %xmm2
vpor %xmm4, %xmm3, %xmm3
vpor %xmm5, %xmm2, %xmm2
vpslld $31, %xmm3, %xmm4
vpslld $30, %xmm3, %xmm5
vpslld $25, %xmm3, %xmm6
vpxor %xmm5, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vmovdqa %xmm4, %xmm5
vpsrldq $4, %xmm5, %xmm5
vpslldq $12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpsrld $0x01, %xmm3, %xmm6
vpsrld $2, %xmm3, %xmm7
vpsrld $7, %xmm3, %xmm4
vpxor %xmm7, %xmm6, %xmm6
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm6, %xmm2, %xmm2
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx1_calc_aad_16_loop
movl 220(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_decrypt_avx1_calc_aad_done
L_AES_GCM_decrypt_avx1_calc_aad_lt16:
subl $16, %esp
vpxor %xmm4, %xmm4, %xmm4
xorl %ebx, %ebx
vmovdqu %xmm4, (%esp)
L_AES_GCM_decrypt_avx1_calc_aad_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx1_calc_aad_loop
vmovdqu (%esp), %xmm4
addl $16, %esp
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
# ghash_gfmul_avx
vpshufd $0x4e, %xmm2, %xmm5
vpshufd $0x4e, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm4
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqa %xmm4, %xmm3
vmovdqa %xmm7, %xmm2
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm5, %xmm2, %xmm2
vpsrld $31, %xmm3, %xmm4
vpsrld $31, %xmm2, %xmm5
vpslld $0x01, %xmm3, %xmm3
vpslld $0x01, %xmm2, %xmm2
vpsrldq $12, %xmm4, %xmm6
vpslldq $4, %xmm4, %xmm4
vpslldq $4, %xmm5, %xmm5
vpor %xmm6, %xmm2, %xmm2
vpor %xmm4, %xmm3, %xmm3
vpor %xmm5, %xmm2, %xmm2
vpslld $31, %xmm3, %xmm4
vpslld $30, %xmm3, %xmm5
vpslld $25, %xmm3, %xmm6
vpxor %xmm5, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vmovdqa %xmm4, %xmm5
vpsrldq $4, %xmm5, %xmm5
vpslldq $12, %xmm4, %xmm4
vpxor %xmm4, %xmm3, %xmm3
vpsrld $0x01, %xmm3, %xmm6
vpsrld $2, %xmm3, %xmm7
vpsrld $7, %xmm3, %xmm4
vpxor %xmm7, %xmm6, %xmm6
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm6, %xmm2, %xmm2
L_AES_GCM_decrypt_avx1_calc_aad_done:
vmovdqu %xmm2, 96(%esp)
movl 196(%esp), %esi
movl 200(%esp), %edi
# Calculate counter and H
vpsrlq $63, %xmm1, %xmm5
vpsllq $0x01, %xmm1, %xmm4
vpslldq $8, %xmm5, %xmm5
vpor %xmm5, %xmm4, %xmm4
vpshufd $0xff, %xmm1, %xmm1
vpsrad $31, %xmm1, %xmm1
vpshufb L_aes_gcm_avx1_bswap_epi64, %xmm0, %xmm0
vpand L_aes_gcm_avx1_mod2_128, %xmm1, %xmm1
vpaddd L_aes_gcm_avx1_one, %xmm0, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, 64(%esp)
xorl %ebx, %ebx
cmpl $0x40, 216(%esp)
movl 216(%esp), %eax
jl L_AES_GCM_decrypt_avx1_done_64
andl $0xffffffc0, %eax
vmovdqa %xmm2, %xmm6
# H ^ 1
vmovdqu %xmm1, (%esp)
# H ^ 2
vpclmulqdq $0x00, %xmm1, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm1, %xmm1, %xmm0
vpslld $31, %xmm4, %xmm5
vpslld $30, %xmm4, %xmm6
vpslld $25, %xmm4, %xmm7
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpsrldq $4, %xmm5, %xmm7
vpslldq $12, %xmm5, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm5
vpsrld $2, %xmm4, %xmm6
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpsrld $7, %xmm4, %xmm4
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm0, %xmm0
vmovdqu %xmm0, 16(%esp)
# H ^ 3
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm1, %xmm5
vpshufd $0x4e, %xmm0, %xmm6
vpclmulqdq $0x11, %xmm1, %xmm0, %xmm7
vpclmulqdq $0x00, %xmm1, %xmm0, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm0, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm4, %xmm4
vpxor %xmm5, %xmm7, %xmm3
vpslld $31, %xmm4, %xmm5
vpslld $30, %xmm4, %xmm6
vpslld $25, %xmm4, %xmm7
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpsrldq $4, %xmm5, %xmm7
vpslldq $12, %xmm5, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm5
vpsrld $2, %xmm4, %xmm6
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpsrld $7, %xmm4, %xmm4
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm3, %xmm3
vmovdqu %xmm3, 32(%esp)
# H ^ 4
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm3
vpslld $31, %xmm4, %xmm5
vpslld $30, %xmm4, %xmm6
vpslld $25, %xmm4, %xmm7
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpsrldq $4, %xmm5, %xmm7
vpslldq $12, %xmm5, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm5
vpsrld $2, %xmm4, %xmm6
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpsrld $7, %xmm4, %xmm4
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm3, %xmm3
vmovdqu %xmm3, 48(%esp)
cmpl %esi, %edi
jne L_AES_GCM_decrypt_avx1_ghash_64
L_AES_GCM_decrypt_avx1_ghash_64_inplace:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
vmovdqu 64(%esp), %xmm4
vmovdqa L_aes_gcm_avx1_bswap_epi64, %xmm3
vpaddd L_aes_gcm_avx1_one, %xmm4, %xmm5
vpshufb %xmm3, %xmm5, %xmm5
vpaddd L_aes_gcm_avx1_two, %xmm4, %xmm6
vpshufb %xmm3, %xmm6, %xmm6
vpaddd L_aes_gcm_avx1_three, %xmm4, %xmm7
vpshufb %xmm3, %xmm7, %xmm7
vpshufb %xmm3, %xmm4, %xmm4
vmovdqu 64(%esp), %xmm3
vpaddd L_aes_gcm_avx1_four, %xmm3, %xmm3
vmovdqu %xmm3, 64(%esp)
vmovdqa (%ebp), %xmm3
vpxor %xmm3, %xmm4, %xmm4
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm3, %xmm7, %xmm7
vmovdqa 16(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 32(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 48(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 64(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 80(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 96(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 112(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 128(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 144(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
cmpl $11, 236(%esp)
vmovdqa 160(%ebp), %xmm3
jl L_AES_GCM_decrypt_avx1inplace_aesenc_64_ghash_avx_aesenc_64_enc_done
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 176(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
cmpl $13, 236(%esp)
vmovdqa 192(%ebp), %xmm3
jl L_AES_GCM_decrypt_avx1inplace_aesenc_64_ghash_avx_aesenc_64_enc_done
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 208(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 224(%ebp), %xmm3
L_AES_GCM_decrypt_avx1inplace_aesenc_64_ghash_avx_aesenc_64_enc_done:
vaesenclast %xmm3, %xmm4, %xmm4
vaesenclast %xmm3, %xmm5, %xmm5
vmovdqu (%ecx), %xmm0
vmovdqu 16(%ecx), %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vmovdqu %xmm0, 112(%esp)
vmovdqu %xmm1, 128(%esp)
vmovdqu %xmm4, (%edx)
vmovdqu %xmm5, 16(%edx)
vaesenclast %xmm3, %xmm6, %xmm6
vaesenclast %xmm3, %xmm7, %xmm7
vmovdqu 32(%ecx), %xmm0
vmovdqu 48(%ecx), %xmm1
vpxor %xmm0, %xmm6, %xmm6
vpxor %xmm1, %xmm7, %xmm7
vmovdqu %xmm0, 144(%esp)
vmovdqu %xmm1, 160(%esp)
vmovdqu %xmm6, 32(%edx)
vmovdqu %xmm7, 48(%edx)
# ghash encrypted counter
vmovdqu 96(%esp), %xmm6
vmovdqu 48(%esp), %xmm3
vmovdqu 112(%esp), %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vpshufd $0x4e, %xmm3, %xmm5
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm4, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm3, %xmm4, %xmm7
vpclmulqdq $0x00, %xmm3, %xmm4, %xmm6
vpclmulqdq $0x00, %xmm1, %xmm5, %xmm5
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqu 32(%esp), %xmm3
vmovdqu 128(%esp), %xmm4
vpshufd $0x4e, %xmm3, %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm3, %xmm0, %xmm0
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm3, %xmm4, %xmm2
vpclmulqdq $0x00, %xmm3, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm1, %xmm0, %xmm0
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm0, %xmm5, %xmm5
vmovdqu 16(%esp), %xmm3
vmovdqu 144(%esp), %xmm4
vpshufd $0x4e, %xmm3, %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm3, %xmm0, %xmm0
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm3, %xmm4, %xmm2
vpclmulqdq $0x00, %xmm3, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm1, %xmm0, %xmm0
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm0, %xmm5, %xmm5
vmovdqu (%esp), %xmm3
vmovdqu 160(%esp), %xmm4
vpshufd $0x4e, %xmm3, %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm3, %xmm0, %xmm0
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm3, %xmm4, %xmm2
vpclmulqdq $0x00, %xmm3, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm1, %xmm0, %xmm0
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm0, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm1
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
vpslld $31, %xmm6, %xmm3
vpslld $30, %xmm6, %xmm0
vpslld $25, %xmm6, %xmm1
vpxor %xmm0, %xmm3, %xmm3
vpxor %xmm1, %xmm3, %xmm3
vpsrldq $4, %xmm3, %xmm0
vpslldq $12, %xmm3, %xmm3
vpxor %xmm3, %xmm6, %xmm6
vpsrld $0x01, %xmm6, %xmm1
vpsrld $2, %xmm6, %xmm5
vpsrld $7, %xmm6, %xmm4
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm6
vmovdqu %xmm6, 96(%esp)
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_avx1_ghash_64_inplace
jmp L_AES_GCM_decrypt_avx1_ghash_64_done
L_AES_GCM_decrypt_avx1_ghash_64:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
vmovdqu 64(%esp), %xmm4
vmovdqa L_aes_gcm_avx1_bswap_epi64, %xmm3
vpaddd L_aes_gcm_avx1_one, %xmm4, %xmm5
vpshufb %xmm3, %xmm5, %xmm5
vpaddd L_aes_gcm_avx1_two, %xmm4, %xmm6
vpshufb %xmm3, %xmm6, %xmm6
vpaddd L_aes_gcm_avx1_three, %xmm4, %xmm7
vpshufb %xmm3, %xmm7, %xmm7
vpshufb %xmm3, %xmm4, %xmm4
vmovdqu 64(%esp), %xmm3
vpaddd L_aes_gcm_avx1_four, %xmm3, %xmm3
vmovdqu %xmm3, 64(%esp)
vmovdqa (%ebp), %xmm3
vpxor %xmm3, %xmm4, %xmm4
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm3, %xmm7, %xmm7
vmovdqa 16(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 32(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 48(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 64(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 80(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 96(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 112(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 128(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 144(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
cmpl $11, 236(%esp)
vmovdqa 160(%ebp), %xmm3
jl L_AES_GCM_decrypt_avx1_aesenc_64_ghash_avx_aesenc_64_enc_done
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 176(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
cmpl $13, 236(%esp)
vmovdqa 192(%ebp), %xmm3
jl L_AES_GCM_decrypt_avx1_aesenc_64_ghash_avx_aesenc_64_enc_done
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 208(%ebp), %xmm3
vaesenc %xmm3, %xmm4, %xmm4
vaesenc %xmm3, %xmm5, %xmm5
vaesenc %xmm3, %xmm6, %xmm6
vaesenc %xmm3, %xmm7, %xmm7
vmovdqa 224(%ebp), %xmm3
L_AES_GCM_decrypt_avx1_aesenc_64_ghash_avx_aesenc_64_enc_done:
vaesenclast %xmm3, %xmm4, %xmm4
vaesenclast %xmm3, %xmm5, %xmm5
vmovdqu (%ecx), %xmm0
vmovdqu 16(%ecx), %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vmovdqu %xmm0, (%ecx)
vmovdqu %xmm1, 16(%ecx)
vmovdqu %xmm4, (%edx)
vmovdqu %xmm5, 16(%edx)
vaesenclast %xmm3, %xmm6, %xmm6
vaesenclast %xmm3, %xmm7, %xmm7
vmovdqu 32(%ecx), %xmm0
vmovdqu 48(%ecx), %xmm1
vpxor %xmm0, %xmm6, %xmm6
vpxor %xmm1, %xmm7, %xmm7
vmovdqu %xmm0, 32(%ecx)
vmovdqu %xmm1, 48(%ecx)
vmovdqu %xmm6, 32(%edx)
vmovdqu %xmm7, 48(%edx)
# ghash encrypted counter
vmovdqu 96(%esp), %xmm6
vmovdqu 48(%esp), %xmm3
vmovdqu (%ecx), %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm4
vpshufd $0x4e, %xmm3, %xmm5
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm4, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm3, %xmm4, %xmm7
vpclmulqdq $0x00, %xmm3, %xmm4, %xmm6
vpclmulqdq $0x00, %xmm1, %xmm5, %xmm5
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vmovdqu 32(%esp), %xmm3
vmovdqu 16(%ecx), %xmm4
vpshufd $0x4e, %xmm3, %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm3, %xmm0, %xmm0
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm3, %xmm4, %xmm2
vpclmulqdq $0x00, %xmm3, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm1, %xmm0, %xmm0
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm0, %xmm5, %xmm5
vmovdqu 16(%esp), %xmm3
vmovdqu 32(%ecx), %xmm4
vpshufd $0x4e, %xmm3, %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm3, %xmm0, %xmm0
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm3, %xmm4, %xmm2
vpclmulqdq $0x00, %xmm3, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm1, %xmm0, %xmm0
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm0, %xmm5, %xmm5
vmovdqu (%esp), %xmm3
vmovdqu 48(%ecx), %xmm4
vpshufd $0x4e, %xmm3, %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm3, %xmm0, %xmm0
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm3, %xmm4, %xmm2
vpclmulqdq $0x00, %xmm3, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm1, %xmm0, %xmm0
vpxor %xmm3, %xmm5, %xmm5
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm0, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm1
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
vpslld $31, %xmm6, %xmm3
vpslld $30, %xmm6, %xmm0
vpslld $25, %xmm6, %xmm1
vpxor %xmm0, %xmm3, %xmm3
vpxor %xmm1, %xmm3, %xmm3
vpsrldq $4, %xmm3, %xmm0
vpslldq $12, %xmm3, %xmm3
vpxor %xmm3, %xmm6, %xmm6
vpsrld $0x01, %xmm6, %xmm1
vpsrld $2, %xmm6, %xmm5
vpsrld $7, %xmm6, %xmm4
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm6
vmovdqu %xmm6, 96(%esp)
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_avx1_ghash_64
L_AES_GCM_decrypt_avx1_ghash_64_done:
vmovdqa %xmm6, %xmm2
vmovdqu (%esp), %xmm1
L_AES_GCM_decrypt_avx1_done_64:
movl 216(%esp), %edx
cmpl %edx, %ebx
jge L_AES_GCM_decrypt_avx1_done_dec
movl 216(%esp), %eax
andl $0xfffffff0, %eax
cmpl %eax, %ebx
jge L_AES_GCM_decrypt_avx1_last_block_done
L_AES_GCM_decrypt_avx1_last_block_start:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
vmovdqu (%ecx), %xmm7
pshufb L_aes_gcm_avx1_bswap_mask, %xmm7
pxor %xmm2, %xmm7
vmovdqu 64(%esp), %xmm5
vmovdqu %xmm7, %xmm7
vpshufb L_aes_gcm_avx1_bswap_epi64, %xmm5, %xmm4
vpaddd L_aes_gcm_avx1_one, %xmm5, %xmm5
vmovdqu %xmm5, 64(%esp)
vpxor (%ebp), %xmm4, %xmm4
vpclmulqdq $16, %xmm1, %xmm7, %xmm0
vaesenc 16(%ebp), %xmm4, %xmm4
vaesenc 32(%ebp), %xmm4, %xmm4
vpclmulqdq $0x01, %xmm1, %xmm7, %xmm3
vaesenc 48(%ebp), %xmm4, %xmm4
vaesenc 64(%ebp), %xmm4, %xmm4
vaesenc 80(%ebp), %xmm4, %xmm4
vpclmulqdq $0x11, %xmm1, %xmm7, %xmm5
vaesenc 96(%ebp), %xmm4, %xmm4
vpxor %xmm3, %xmm0, %xmm0
vpslldq $8, %xmm0, %xmm6
vpsrldq $8, %xmm0, %xmm0
vaesenc 112(%ebp), %xmm4, %xmm4
vpclmulqdq $0x00, %xmm1, %xmm7, %xmm3
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm0, %xmm5, %xmm5
vmovdqa L_aes_gcm_avx1_mod2_128, %xmm7
vpclmulqdq $16, %xmm7, %xmm6, %xmm3
vaesenc 128(%ebp), %xmm4, %xmm4
vpshufd $0x4e, %xmm6, %xmm0
vpxor %xmm3, %xmm0, %xmm0
vpclmulqdq $16, %xmm7, %xmm0, %xmm3
vaesenc 144(%ebp), %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm2
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm5, %xmm2, %xmm2
cmpl $11, 236(%esp)
vmovdqa 160(%ebp), %xmm5
jl L_AES_GCM_decrypt_avx1_aesenc_gfmul_last
vaesenc %xmm5, %xmm4, %xmm4
vaesenc 176(%ebp), %xmm4, %xmm4
cmpl $13, 236(%esp)
vmovdqa 192(%ebp), %xmm5
jl L_AES_GCM_decrypt_avx1_aesenc_gfmul_last
vaesenc %xmm5, %xmm4, %xmm4
vaesenc 208(%ebp), %xmm4, %xmm4
vmovdqa 224(%ebp), %xmm5
L_AES_GCM_decrypt_avx1_aesenc_gfmul_last:
vaesenclast %xmm5, %xmm4, %xmm4
vmovdqu (%ecx), %xmm5
vpxor %xmm5, %xmm4, %xmm4
vmovdqu %xmm4, (%edx)
addl $16, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_avx1_last_block_start
L_AES_GCM_decrypt_avx1_last_block_done:
movl 216(%esp), %ecx
movl %ecx, %edx
andl $15, %ecx
jz L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_done
vmovdqu 64(%esp), %xmm0
vpshufb L_aes_gcm_avx1_bswap_epi64, %xmm0, %xmm0
vpxor (%ebp), %xmm0, %xmm0
vaesenc 16(%ebp), %xmm0, %xmm0
vaesenc 32(%ebp), %xmm0, %xmm0
vaesenc 48(%ebp), %xmm0, %xmm0
vaesenc 64(%ebp), %xmm0, %xmm0
vaesenc 80(%ebp), %xmm0, %xmm0
vaesenc 96(%ebp), %xmm0, %xmm0
vaesenc 112(%ebp), %xmm0, %xmm0
vaesenc 128(%ebp), %xmm0, %xmm0
vaesenc 144(%ebp), %xmm0, %xmm0
cmpl $11, 236(%esp)
vmovdqa 160(%ebp), %xmm5
jl L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_aesenc_avx_last
vaesenc %xmm5, %xmm0, %xmm0
vaesenc 176(%ebp), %xmm0, %xmm0
cmpl $13, 236(%esp)
vmovdqa 192(%ebp), %xmm5
jl L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_aesenc_avx_last
vaesenc %xmm5, %xmm0, %xmm0
vaesenc 208(%ebp), %xmm0, %xmm0
vmovdqa 224(%ebp), %xmm5
L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_aesenc_avx_last:
vaesenclast %xmm5, %xmm0, %xmm0
subl $32, %esp
xorl %ecx, %ecx
vmovdqu %xmm0, (%esp)
vpxor %xmm4, %xmm4, %xmm4
vmovdqu %xmm4, 16(%esp)
L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_loop:
movzbl (%esi,%ebx,1), %eax
movb %al, 16(%esp,%ecx,1)
xorb (%esp,%ecx,1), %al
movb %al, (%edi,%ebx,1)
incl %ebx
incl %ecx
cmpl %edx, %ebx
jl L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_loop
vmovdqu 16(%esp), %xmm0
addl $32, %esp
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm2, %xmm2
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm1, %xmm5
vpshufd $0x4e, %xmm2, %xmm6
vpclmulqdq $0x11, %xmm1, %xmm2, %xmm7
vpclmulqdq $0x00, %xmm1, %xmm2, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm4, %xmm4
vpxor %xmm5, %xmm7, %xmm2
vpslld $31, %xmm4, %xmm5
vpslld $30, %xmm4, %xmm6
vpslld $25, %xmm4, %xmm7
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpsrldq $4, %xmm5, %xmm7
vpslldq $12, %xmm5, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm5
vpsrld $2, %xmm4, %xmm6
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpsrld $7, %xmm4, %xmm4
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm2, %xmm2
L_AES_GCM_decrypt_avx1_aesenc_last15_dec_avx_done:
L_AES_GCM_decrypt_avx1_done_dec:
movl 212(%esp), %esi
movl 228(%esp), %ebp
movl 216(%esp), %edx
movl 220(%esp), %ecx
shll $3, %edx
shll $3, %ecx
vpinsrd $0x00, %edx, %xmm4, %xmm4
vpinsrd $2, %ecx, %xmm4, %xmm4
movl 216(%esp), %edx
movl 220(%esp), %ecx
shrl $29, %edx
shrl $29, %ecx
vpinsrd $0x01, %edx, %xmm4, %xmm4
vpinsrd $3, %ecx, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm1, %xmm5
vpshufd $0x4e, %xmm2, %xmm6
vpclmulqdq $0x11, %xmm1, %xmm2, %xmm7
vpclmulqdq $0x00, %xmm1, %xmm2, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm6
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm6, %xmm4, %xmm4
vpxor %xmm5, %xmm7, %xmm2
vpslld $31, %xmm4, %xmm5
vpslld $30, %xmm4, %xmm6
vpslld $25, %xmm4, %xmm7
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm7, %xmm5, %xmm5
vpsrldq $4, %xmm5, %xmm7
vpslldq $12, %xmm5, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm5
vpsrld $2, %xmm4, %xmm6
vpxor %xmm6, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpsrld $7, %xmm4, %xmm4
vpxor %xmm7, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm2, %xmm2
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm2, %xmm2
vpxor 80(%esp), %xmm2, %xmm4
movl 240(%esp), %edi
cmpl $16, %ebp
je L_AES_GCM_decrypt_avx1_cmp_tag_16
subl $16, %esp
xorl %ecx, %ecx
xorl %ebx, %ebx
vmovdqu %xmm4, (%esp)
L_AES_GCM_decrypt_avx1_cmp_tag_loop:
movzbl (%esp,%ecx,1), %eax
xorb (%esi,%ecx,1), %al
orb %al, %bl
incl %ecx
cmpl %ebp, %ecx
jne L_AES_GCM_decrypt_avx1_cmp_tag_loop
cmpb $0x00, %bl
sete %bl
addl $16, %esp
xorl %ecx, %ecx
jmp L_AES_GCM_decrypt_avx1_cmp_tag_done
L_AES_GCM_decrypt_avx1_cmp_tag_16:
vmovdqu (%esi), %xmm5
vpcmpeqb %xmm5, %xmm4, %xmm4
vpmovmskb %xmm4, %edx
# %%edx == 0xFFFF then return 1 else => return 0
xorl %ebx, %ebx
cmpl $0xffff, %edx
sete %bl
L_AES_GCM_decrypt_avx1_cmp_tag_done:
movl %ebx, (%edi)
addl $0xb0, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_decrypt_avx1,.-AES_GCM_decrypt_avx1
#ifdef WOLFSSL_AESGCM_STREAM
.text
.globl AES_GCM_init_avx1
.type AES_GCM_init_avx1,@function
.align 16
AES_GCM_init_avx1:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $16, %esp
movl 36(%esp), %ebp
movl 44(%esp), %esi
movl 60(%esp), %edi
vpxor %xmm4, %xmm4, %xmm4
movl 48(%esp), %edx
cmpl $12, %edx
jne L_AES_GCM_init_avx1_iv_not_12
# # Calculate values when IV is 12 bytes
# Set counter based on IV
movl $0x1000000, %ecx
vpinsrd $0x00, (%esi), %xmm4, %xmm4
vpinsrd $0x01, 4(%esi), %xmm4, %xmm4
vpinsrd $2, 8(%esi), %xmm4, %xmm4
vpinsrd $3, %ecx, %xmm4, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
vmovdqa (%ebp), %xmm5
vpxor %xmm5, %xmm4, %xmm1
vmovdqa 16(%ebp), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 32(%ebp), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 48(%ebp), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 64(%ebp), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 80(%ebp), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 96(%ebp), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 112(%ebp), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 128(%ebp), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 144(%ebp), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
cmpl $11, 40(%esp)
vmovdqa 160(%ebp), %xmm7
jl L_AES_GCM_init_avx1_calc_iv_12_last
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 176(%ebp), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
cmpl $13, 40(%esp)
vmovdqa 192(%ebp), %xmm7
jl L_AES_GCM_init_avx1_calc_iv_12_last
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 208(%ebp), %xmm7
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm1, %xmm1
vmovdqa 224(%ebp), %xmm7
L_AES_GCM_init_avx1_calc_iv_12_last:
vaesenclast %xmm7, %xmm5, %xmm5
vaesenclast %xmm7, %xmm1, %xmm1
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm5, %xmm5
vmovdqu %xmm1, (%edi)
jmp L_AES_GCM_init_avx1_iv_done
L_AES_GCM_init_avx1_iv_not_12:
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
vmovdqa (%ebp), %xmm5
vaesenc 16(%ebp), %xmm5, %xmm5
vaesenc 32(%ebp), %xmm5, %xmm5
vaesenc 48(%ebp), %xmm5, %xmm5
vaesenc 64(%ebp), %xmm5, %xmm5
vaesenc 80(%ebp), %xmm5, %xmm5
vaesenc 96(%ebp), %xmm5, %xmm5
vaesenc 112(%ebp), %xmm5, %xmm5
vaesenc 128(%ebp), %xmm5, %xmm5
vaesenc 144(%ebp), %xmm5, %xmm5
cmpl $11, 40(%esp)
vmovdqa 160(%ebp), %xmm1
jl L_AES_GCM_init_avx1_calc_iv_1_aesenc_avx_last
vaesenc %xmm1, %xmm5, %xmm5
vaesenc 176(%ebp), %xmm5, %xmm5
cmpl $13, 40(%esp)
vmovdqa 192(%ebp), %xmm1
jl L_AES_GCM_init_avx1_calc_iv_1_aesenc_avx_last
vaesenc %xmm1, %xmm5, %xmm5
vaesenc 208(%ebp), %xmm5, %xmm5
vmovdqa 224(%ebp), %xmm1
L_AES_GCM_init_avx1_calc_iv_1_aesenc_avx_last:
vaesenclast %xmm1, %xmm5, %xmm5
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm5, %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movl $0x00, %ecx
je L_AES_GCM_init_avx1_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_init_avx1_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_init_avx1_calc_iv_16_loop:
vmovdqu (%esi,%ecx,1), %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm7
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm7, %xmm0
vpslld $30, %xmm7, %xmm1
vpslld $25, %xmm7, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm2
vpsrld $2, %xmm7, %xmm3
vpsrld $7, %xmm7, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_init_avx1_calc_iv_16_loop
movl 48(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_init_avx1_calc_iv_done
L_AES_GCM_init_avx1_calc_iv_lt16:
subl $16, %esp
vpxor %xmm0, %xmm0, %xmm0
xorl %ebx, %ebx
vmovdqu %xmm0, (%esp)
L_AES_GCM_init_avx1_calc_iv_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_init_avx1_calc_iv_loop
vmovdqu (%esp), %xmm0
addl $16, %esp
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm7
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm7, %xmm0
vpslld $30, %xmm7, %xmm1
vpslld $25, %xmm7, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm2
vpsrld $2, %xmm7, %xmm3
vpsrld $7, %xmm7, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
L_AES_GCM_init_avx1_calc_iv_done:
# T = Encrypt counter
vpxor %xmm0, %xmm0, %xmm0
shll $3, %edx
vpinsrd $0x00, %edx, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm7
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm7, %xmm0
vpslld $30, %xmm7, %xmm1
vpslld $25, %xmm7, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm7, %xmm7
vpsrld $0x01, %xmm7, %xmm2
vpsrld $2, %xmm7, %xmm3
vpsrld $7, %xmm7, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
# Encrypt counter
vmovdqa (%ebp), %xmm0
vpxor %xmm4, %xmm0, %xmm0
vaesenc 16(%ebp), %xmm0, %xmm0
vaesenc 32(%ebp), %xmm0, %xmm0
vaesenc 48(%ebp), %xmm0, %xmm0
vaesenc 64(%ebp), %xmm0, %xmm0
vaesenc 80(%ebp), %xmm0, %xmm0
vaesenc 96(%ebp), %xmm0, %xmm0
vaesenc 112(%ebp), %xmm0, %xmm0
vaesenc 128(%ebp), %xmm0, %xmm0
vaesenc 144(%ebp), %xmm0, %xmm0
cmpl $11, 40(%esp)
vmovdqa 160(%ebp), %xmm1
jl L_AES_GCM_init_avx1_calc_iv_2_aesenc_avx_last
vaesenc %xmm1, %xmm0, %xmm0
vaesenc 176(%ebp), %xmm0, %xmm0
cmpl $13, 40(%esp)
vmovdqa 192(%ebp), %xmm1
jl L_AES_GCM_init_avx1_calc_iv_2_aesenc_avx_last
vaesenc %xmm1, %xmm0, %xmm0
vaesenc 208(%ebp), %xmm0, %xmm0
vmovdqa 224(%ebp), %xmm1
L_AES_GCM_init_avx1_calc_iv_2_aesenc_avx_last:
vaesenclast %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, (%edi)
L_AES_GCM_init_avx1_iv_done:
movl 52(%esp), %ebp
movl 56(%esp), %edi
vpshufb L_aes_gcm_avx1_bswap_epi64, %xmm4, %xmm4
vpaddd L_aes_gcm_avx1_one, %xmm4, %xmm4
vmovdqa %xmm5, (%ebp)
vmovdqa %xmm4, (%edi)
addl $16, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_init_avx1,.-AES_GCM_init_avx1
.text
.globl AES_GCM_aad_update_avx1
.type AES_GCM_aad_update_avx1,@function
.align 16
AES_GCM_aad_update_avx1:
pushl %esi
pushl %edi
movl 12(%esp), %esi
movl 16(%esp), %edx
movl 20(%esp), %edi
movl 24(%esp), %eax
vmovdqa (%edi), %xmm5
vmovdqa (%eax), %xmm6
xorl %ecx, %ecx
L_AES_GCM_aad_update_avx1_16_loop:
vmovdqu (%esi,%ecx,1), %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm5, %xmm5
# ghash_gfmul_avx
vpshufd $0x4e, %xmm5, %xmm1
vpshufd $0x4e, %xmm6, %xmm2
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm6, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm4
vmovdqa %xmm3, %xmm5
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm5, %xmm5
vpsrld $31, %xmm4, %xmm0
vpsrld $31, %xmm5, %xmm1
vpslld $0x01, %xmm4, %xmm4
vpslld $0x01, %xmm5, %xmm5
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm5, %xmm5
vpor %xmm0, %xmm4, %xmm4
vpor %xmm1, %xmm5, %xmm5
vpslld $31, %xmm4, %xmm0
vpslld $30, %xmm4, %xmm1
vpslld $25, %xmm4, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm2
vpsrld $2, %xmm4, %xmm3
vpsrld $7, %xmm4, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm4, %xmm2, %xmm2
vpxor %xmm2, %xmm5, %xmm5
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_aad_update_avx1_16_loop
vmovdqa %xmm5, (%edi)
popl %edi
popl %esi
ret
.size AES_GCM_aad_update_avx1,.-AES_GCM_aad_update_avx1
.text
.globl AES_GCM_encrypt_block_avx1
.type AES_GCM_encrypt_block_avx1,@function
.align 16
AES_GCM_encrypt_block_avx1:
pushl %esi
pushl %edi
movl 12(%esp), %ecx
movl 16(%esp), %eax
movl 20(%esp), %edi
movl 24(%esp), %esi
movl 28(%esp), %edx
vmovdqu (%edx), %xmm1
vpshufb L_aes_gcm_avx1_bswap_epi64, %xmm1, %xmm0
vpaddd L_aes_gcm_avx1_one, %xmm1, %xmm1
vmovdqu %xmm1, (%edx)
vpxor (%ecx), %xmm0, %xmm0
vaesenc 16(%ecx), %xmm0, %xmm0
vaesenc 32(%ecx), %xmm0, %xmm0
vaesenc 48(%ecx), %xmm0, %xmm0
vaesenc 64(%ecx), %xmm0, %xmm0
vaesenc 80(%ecx), %xmm0, %xmm0
vaesenc 96(%ecx), %xmm0, %xmm0
vaesenc 112(%ecx), %xmm0, %xmm0
vaesenc 128(%ecx), %xmm0, %xmm0
vaesenc 144(%ecx), %xmm0, %xmm0
cmpl $11, %eax
vmovdqa 160(%ecx), %xmm1
jl L_AES_GCM_encrypt_block_avx1_aesenc_block_aesenc_avx_last
vaesenc %xmm1, %xmm0, %xmm0
vaesenc 176(%ecx), %xmm0, %xmm0
cmpl $13, %eax
vmovdqa 192(%ecx), %xmm1
jl L_AES_GCM_encrypt_block_avx1_aesenc_block_aesenc_avx_last
vaesenc %xmm1, %xmm0, %xmm0
vaesenc 208(%ecx), %xmm0, %xmm0
vmovdqa 224(%ecx), %xmm1
L_AES_GCM_encrypt_block_avx1_aesenc_block_aesenc_avx_last:
vaesenclast %xmm1, %xmm0, %xmm0
vmovdqu (%esi), %xmm1
vpxor %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, (%edi)
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
popl %edi
popl %esi
ret
.size AES_GCM_encrypt_block_avx1,.-AES_GCM_encrypt_block_avx1
.text
.globl AES_GCM_ghash_block_avx1
.type AES_GCM_ghash_block_avx1,@function
.align 16
AES_GCM_ghash_block_avx1:
movl 4(%esp), %edx
movl 8(%esp), %eax
movl 12(%esp), %ecx
vmovdqa (%eax), %xmm4
vmovdqa (%ecx), %xmm5
vmovdqu (%edx), %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpshufd $0x4e, %xmm4, %xmm1
vpshufd $0x4e, %xmm5, %xmm2
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm6
vmovdqa %xmm3, %xmm4
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm1, %xmm4, %xmm4
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
vpslld $31, %xmm6, %xmm0
vpslld $30, %xmm6, %xmm1
vpslld $25, %xmm6, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
vpsrld $0x01, %xmm6, %xmm2
vpsrld $2, %xmm6, %xmm3
vpsrld $7, %xmm6, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm6, %xmm2, %xmm2
vpxor %xmm2, %xmm4, %xmm4
vmovdqa %xmm4, (%eax)
ret
.size AES_GCM_ghash_block_avx1,.-AES_GCM_ghash_block_avx1
.text
.globl AES_GCM_encrypt_update_avx1
.type AES_GCM_encrypt_update_avx1,@function
.align 16
AES_GCM_encrypt_update_avx1:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $0x60, %esp
movl 144(%esp), %esi
vmovdqa (%esi), %xmm4
vmovdqu %xmm4, 64(%esp)
movl 136(%esp), %esi
movl 140(%esp), %ebp
vmovdqa (%esi), %xmm6
vmovdqa (%ebp), %xmm5
vmovdqu %xmm6, 80(%esp)
movl 116(%esp), %ebp
movl 124(%esp), %edi
movl 128(%esp), %esi
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_aes_gcm_avx1_mod2_128, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
xorl %ebx, %ebx
cmpl $0x40, 132(%esp)
movl 132(%esp), %eax
jl L_AES_GCM_encrypt_update_avx1_done_64
andl $0xffffffc0, %eax
vmovdqa %xmm6, %xmm2
# H ^ 1
vmovdqu %xmm5, (%esp)
# H ^ 2
vpclmulqdq $0x00, %xmm5, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm5, %xmm5, %xmm4
vpslld $31, %xmm0, %xmm1
vpslld $30, %xmm0, %xmm2
vpslld $25, %xmm0, %xmm3
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpsrldq $4, %xmm1, %xmm3
vpslldq $12, %xmm1, %xmm1
vpxor %xmm1, %xmm0, %xmm0
vpsrld $0x01, %xmm0, %xmm1
vpsrld $2, %xmm0, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpsrld $7, %xmm0, %xmm0
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
vmovdqu %xmm4, 16(%esp)
# H ^ 3
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm1
vpshufd $0x4e, %xmm4, %xmm2
vpclmulqdq $0x11, %xmm5, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm4, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm0, %xmm0
vpxor %xmm1, %xmm3, %xmm7
vpslld $31, %xmm0, %xmm1
vpslld $30, %xmm0, %xmm2
vpslld $25, %xmm0, %xmm3
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpsrldq $4, %xmm1, %xmm3
vpslldq $12, %xmm1, %xmm1
vpxor %xmm1, %xmm0, %xmm0
vpsrld $0x01, %xmm0, %xmm1
vpsrld $2, %xmm0, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpsrld $7, %xmm0, %xmm0
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
vmovdqu %xmm7, 32(%esp)
# H ^ 4
vpclmulqdq $0x00, %xmm4, %xmm4, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm4, %xmm7
vpslld $31, %xmm0, %xmm1
vpslld $30, %xmm0, %xmm2
vpslld $25, %xmm0, %xmm3
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpsrldq $4, %xmm1, %xmm3
vpslldq $12, %xmm1, %xmm1
vpxor %xmm1, %xmm0, %xmm0
vpsrld $0x01, %xmm0, %xmm1
vpsrld $2, %xmm0, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpsrld $7, %xmm0, %xmm0
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
vmovdqu %xmm7, 48(%esp)
# First 64 bytes of input
vmovdqu 64(%esp), %xmm0
vmovdqa L_aes_gcm_avx1_bswap_epi64, %xmm7
vpaddd L_aes_gcm_avx1_one, %xmm0, %xmm1
vpshufb %xmm7, %xmm1, %xmm1
vpaddd L_aes_gcm_avx1_two, %xmm0, %xmm2
vpshufb %xmm7, %xmm2, %xmm2
vpaddd L_aes_gcm_avx1_three, %xmm0, %xmm3
vpshufb %xmm7, %xmm3, %xmm3
vpshufb %xmm7, %xmm0, %xmm0
vmovdqu 64(%esp), %xmm7
vpaddd L_aes_gcm_avx1_four, %xmm7, %xmm7
vmovdqu %xmm7, 64(%esp)
vmovdqa (%ebp), %xmm7
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm7, %xmm3, %xmm3
vmovdqa 16(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 32(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 48(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 64(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 80(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 96(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 112(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 128(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 144(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $11, 120(%esp)
vmovdqa 160(%ebp), %xmm7
jl L_AES_GCM_encrypt_update_avx1_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 176(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $13, 120(%esp)
vmovdqa 192(%ebp), %xmm7
jl L_AES_GCM_encrypt_update_avx1_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 208(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 224(%ebp), %xmm7
L_AES_GCM_encrypt_update_avx1_aesenc_64_enc_done:
vaesenclast %xmm7, %xmm0, %xmm0
vaesenclast %xmm7, %xmm1, %xmm1
vmovdqu (%esi), %xmm4
vmovdqu 16(%esi), %xmm5
vpxor %xmm4, %xmm0, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vmovdqu %xmm4, (%esi)
vmovdqu %xmm5, 16(%esi)
vmovdqu %xmm0, (%edi)
vmovdqu %xmm1, 16(%edi)
vaesenclast %xmm7, %xmm2, %xmm2
vaesenclast %xmm7, %xmm3, %xmm3
vmovdqu 32(%esi), %xmm4
vmovdqu 48(%esi), %xmm5
vpxor %xmm4, %xmm2, %xmm2
vpxor %xmm5, %xmm3, %xmm3
vmovdqu %xmm4, 32(%esi)
vmovdqu %xmm5, 48(%esi)
vmovdqu %xmm2, 32(%edi)
vmovdqu %xmm3, 48(%edi)
cmpl $0x40, %eax
movl $0x40, %ebx
movl %esi, %ecx
movl %edi, %edx
jle L_AES_GCM_encrypt_update_avx1_end_64
# More 64 bytes of input
L_AES_GCM_encrypt_update_avx1_ghash_64:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
vmovdqu 64(%esp), %xmm0
vmovdqa L_aes_gcm_avx1_bswap_epi64, %xmm7
vpaddd L_aes_gcm_avx1_one, %xmm0, %xmm1
vpshufb %xmm7, %xmm1, %xmm1
vpaddd L_aes_gcm_avx1_two, %xmm0, %xmm2
vpshufb %xmm7, %xmm2, %xmm2
vpaddd L_aes_gcm_avx1_three, %xmm0, %xmm3
vpshufb %xmm7, %xmm3, %xmm3
vpshufb %xmm7, %xmm0, %xmm0
vmovdqu 64(%esp), %xmm7
vpaddd L_aes_gcm_avx1_four, %xmm7, %xmm7
vmovdqu %xmm7, 64(%esp)
vmovdqa (%ebp), %xmm7
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm7, %xmm3, %xmm3
vmovdqa 16(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 32(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 48(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 64(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 80(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 96(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 112(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 128(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 144(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $11, 120(%esp)
vmovdqa 160(%ebp), %xmm7
jl L_AES_GCM_encrypt_update_avx1_aesenc_64_ghash_avx_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 176(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $13, 120(%esp)
vmovdqa 192(%ebp), %xmm7
jl L_AES_GCM_encrypt_update_avx1_aesenc_64_ghash_avx_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 208(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 224(%ebp), %xmm7
L_AES_GCM_encrypt_update_avx1_aesenc_64_ghash_avx_aesenc_64_enc_done:
vaesenclast %xmm7, %xmm0, %xmm0
vaesenclast %xmm7, %xmm1, %xmm1
vmovdqu (%ecx), %xmm4
vmovdqu 16(%ecx), %xmm5
vpxor %xmm4, %xmm0, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vmovdqu %xmm0, (%edx)
vmovdqu %xmm1, 16(%edx)
vaesenclast %xmm7, %xmm2, %xmm2
vaesenclast %xmm7, %xmm3, %xmm3
vmovdqu 32(%ecx), %xmm4
vmovdqu 48(%ecx), %xmm5
vpxor %xmm4, %xmm2, %xmm2
vpxor %xmm5, %xmm3, %xmm3
vmovdqu %xmm2, 32(%edx)
vmovdqu %xmm3, 48(%edx)
# ghash encrypted counter
vmovdqu 80(%esp), %xmm2
vmovdqu 48(%esp), %xmm7
vmovdqu -64(%edx), %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm3
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm2
vpclmulqdq $0x00, %xmm5, %xmm1, %xmm1
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqu 32(%esp), %xmm7
vmovdqu -48(%edx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 16(%esp), %xmm7
vmovdqu -32(%edx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu (%esp), %xmm7
vmovdqu -16(%edx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vpslldq $8, %xmm1, %xmm5
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm1, %xmm3, %xmm3
vpslld $31, %xmm2, %xmm7
vpslld $30, %xmm2, %xmm4
vpslld $25, %xmm2, %xmm5
vpxor %xmm4, %xmm7, %xmm7
vpxor %xmm5, %xmm7, %xmm7
vpsrldq $4, %xmm7, %xmm4
vpslldq $12, %xmm7, %xmm7
vpxor %xmm7, %xmm2, %xmm2
vpsrld $0x01, %xmm2, %xmm5
vpsrld $2, %xmm2, %xmm1
vpsrld $7, %xmm2, %xmm0
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm3, %xmm2, %xmm2
vmovdqu %xmm2, 80(%esp)
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_encrypt_update_avx1_ghash_64
L_AES_GCM_encrypt_update_avx1_end_64:
movdqu 80(%esp), %xmm6
# Block 1
vmovdqa L_aes_gcm_avx1_bswap_mask, %xmm0
vmovdqu (%edx), %xmm5
pshufb %xmm0, %xmm5
vmovdqu 48(%esp), %xmm7
pxor %xmm6, %xmm5
# ghash_gfmul_avx
vpshufd $0x4e, %xmm5, %xmm1
vpshufd $0x4e, %xmm7, %xmm2
vpclmulqdq $0x11, %xmm5, %xmm7, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm7, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqa %xmm0, %xmm4
vmovdqa %xmm3, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
# Block 2
vmovdqa L_aes_gcm_avx1_bswap_mask, %xmm0
vmovdqu 16(%edx), %xmm5
pshufb %xmm0, %xmm5
vmovdqu 32(%esp), %xmm7
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm5, %xmm1
vpshufd $0x4e, %xmm7, %xmm2
vpclmulqdq $0x11, %xmm5, %xmm7, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm7, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
# Block 3
vmovdqa L_aes_gcm_avx1_bswap_mask, %xmm0
vmovdqu 32(%edx), %xmm5
pshufb %xmm0, %xmm5
vmovdqu 16(%esp), %xmm7
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm5, %xmm1
vpshufd $0x4e, %xmm7, %xmm2
vpclmulqdq $0x11, %xmm5, %xmm7, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm7, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
# Block 4
vmovdqa L_aes_gcm_avx1_bswap_mask, %xmm0
vmovdqu 48(%edx), %xmm5
pshufb %xmm0, %xmm5
vmovdqu (%esp), %xmm7
# ghash_gfmul_xor_avx
vpshufd $0x4e, %xmm5, %xmm1
vpshufd $0x4e, %xmm7, %xmm2
vpclmulqdq $0x11, %xmm5, %xmm7, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm7, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm4, %xmm4
vpxor %xmm3, %xmm6, %xmm6
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm4, %xmm4
vpxor %xmm1, %xmm6, %xmm6
vpslld $31, %xmm4, %xmm0
vpslld $30, %xmm4, %xmm1
vpslld $25, %xmm4, %xmm2
vpxor %xmm1, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, %xmm1
vpsrldq $4, %xmm1, %xmm1
vpslldq $12, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
vpsrld $0x01, %xmm4, %xmm2
vpsrld $2, %xmm4, %xmm3
vpsrld $7, %xmm4, %xmm0
vpxor %xmm3, %xmm2, %xmm2
vpxor %xmm0, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm4, %xmm2, %xmm2
vpxor %xmm2, %xmm6, %xmm6
vmovdqu (%esp), %xmm5
L_AES_GCM_encrypt_update_avx1_done_64:
movl 132(%esp), %edx
cmpl %edx, %ebx
jge L_AES_GCM_encrypt_update_avx1_done_enc
movl 132(%esp), %eax
andl $0xfffffff0, %eax
cmpl %eax, %ebx
jge L_AES_GCM_encrypt_update_avx1_last_block_done
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
vmovdqu 64(%esp), %xmm1
vpshufb L_aes_gcm_avx1_bswap_epi64, %xmm1, %xmm0
vpaddd L_aes_gcm_avx1_one, %xmm1, %xmm1
vmovdqu %xmm1, 64(%esp)
vpxor (%ebp), %xmm0, %xmm0
vaesenc 16(%ebp), %xmm0, %xmm0
vaesenc 32(%ebp), %xmm0, %xmm0
vaesenc 48(%ebp), %xmm0, %xmm0
vaesenc 64(%ebp), %xmm0, %xmm0
vaesenc 80(%ebp), %xmm0, %xmm0
vaesenc 96(%ebp), %xmm0, %xmm0
vaesenc 112(%ebp), %xmm0, %xmm0
vaesenc 128(%ebp), %xmm0, %xmm0
vaesenc 144(%ebp), %xmm0, %xmm0
cmpl $11, 120(%esp)
vmovdqa 160(%ebp), %xmm1
jl L_AES_GCM_encrypt_update_avx1_aesenc_block_aesenc_avx_last
vaesenc %xmm1, %xmm0, %xmm0
vaesenc 176(%ebp), %xmm0, %xmm0
cmpl $13, 120(%esp)
vmovdqa 192(%ebp), %xmm1
jl L_AES_GCM_encrypt_update_avx1_aesenc_block_aesenc_avx_last
vaesenc %xmm1, %xmm0, %xmm0
vaesenc 208(%ebp), %xmm0, %xmm0
vmovdqa 224(%ebp), %xmm1
L_AES_GCM_encrypt_update_avx1_aesenc_block_aesenc_avx_last:
vaesenclast %xmm1, %xmm0, %xmm0
vmovdqu (%ecx), %xmm1
vpxor %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, (%edx)
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
addl $16, %ebx
cmpl %eax, %ebx
jge L_AES_GCM_encrypt_update_avx1_last_block_ghash
L_AES_GCM_encrypt_update_avx1_last_block_start:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
vmovdqu 64(%esp), %xmm1
vmovdqu %xmm6, %xmm3
vpshufb L_aes_gcm_avx1_bswap_epi64, %xmm1, %xmm0
vpaddd L_aes_gcm_avx1_one, %xmm1, %xmm1
vmovdqu %xmm1, 64(%esp)
vpxor (%ebp), %xmm0, %xmm0
vpclmulqdq $16, %xmm5, %xmm3, %xmm4
vaesenc 16(%ebp), %xmm0, %xmm0
vaesenc 32(%ebp), %xmm0, %xmm0
vpclmulqdq $0x01, %xmm5, %xmm3, %xmm7
vaesenc 48(%ebp), %xmm0, %xmm0
vaesenc 64(%ebp), %xmm0, %xmm0
vaesenc 80(%ebp), %xmm0, %xmm0
vpclmulqdq $0x11, %xmm5, %xmm3, %xmm1
vaesenc 96(%ebp), %xmm0, %xmm0
vpxor %xmm7, %xmm4, %xmm4
vpslldq $8, %xmm4, %xmm2
vpsrldq $8, %xmm4, %xmm4
vaesenc 112(%ebp), %xmm0, %xmm0
vpclmulqdq $0x00, %xmm5, %xmm3, %xmm7
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm4, %xmm1, %xmm1
vmovdqa L_aes_gcm_avx1_mod2_128, %xmm3
vpclmulqdq $16, %xmm3, %xmm2, %xmm7
vaesenc 128(%ebp), %xmm0, %xmm0
vpshufd $0x4e, %xmm2, %xmm4
vpxor %xmm7, %xmm4, %xmm4
vpclmulqdq $16, %xmm3, %xmm4, %xmm7
vaesenc 144(%ebp), %xmm0, %xmm0
vpshufd $0x4e, %xmm4, %xmm6
vpxor %xmm7, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
cmpl $11, 120(%esp)
vmovdqa 160(%ebp), %xmm1
jl L_AES_GCM_encrypt_update_avx1_aesenc_gfmul_last
vaesenc %xmm1, %xmm0, %xmm0
vaesenc 176(%ebp), %xmm0, %xmm0
cmpl $13, 120(%esp)
vmovdqa 192(%ebp), %xmm1
jl L_AES_GCM_encrypt_update_avx1_aesenc_gfmul_last
vaesenc %xmm1, %xmm0, %xmm0
vaesenc 208(%ebp), %xmm0, %xmm0
vmovdqa 224(%ebp), %xmm1
L_AES_GCM_encrypt_update_avx1_aesenc_gfmul_last:
vaesenclast %xmm1, %xmm0, %xmm0
vmovdqu (%ecx), %xmm1
vpxor %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, (%edx)
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
addl $16, %ebx
vpxor %xmm0, %xmm6, %xmm6
cmpl %eax, %ebx
jl L_AES_GCM_encrypt_update_avx1_last_block_start
L_AES_GCM_encrypt_update_avx1_last_block_ghash:
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm1
vpshufd $0x4e, %xmm6, %xmm2
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm6, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm0, %xmm0
vpxor %xmm1, %xmm3, %xmm6
vpslld $31, %xmm0, %xmm1
vpslld $30, %xmm0, %xmm2
vpslld $25, %xmm0, %xmm3
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpsrldq $4, %xmm1, %xmm3
vpslldq $12, %xmm1, %xmm1
vpxor %xmm1, %xmm0, %xmm0
vpsrld $0x01, %xmm0, %xmm1
vpsrld $2, %xmm0, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpsrld $7, %xmm0, %xmm0
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
L_AES_GCM_encrypt_update_avx1_last_block_done:
L_AES_GCM_encrypt_update_avx1_done_enc:
movl 136(%esp), %esi
movl 144(%esp), %edi
vmovdqu 64(%esp), %xmm4
vmovdqa %xmm6, (%esi)
vmovdqu %xmm4, (%edi)
addl $0x60, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_encrypt_update_avx1,.-AES_GCM_encrypt_update_avx1
.text
.globl AES_GCM_encrypt_final_avx1
.type AES_GCM_encrypt_final_avx1,@function
.align 16
AES_GCM_encrypt_final_avx1:
pushl %esi
pushl %edi
pushl %ebp
subl $16, %esp
movl 32(%esp), %ebp
movl 52(%esp), %esi
movl 56(%esp), %edi
vmovdqa (%ebp), %xmm4
vmovdqa (%esi), %xmm5
vmovdqa (%edi), %xmm6
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_aes_gcm_avx1_mod2_128, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
movl 44(%esp), %edx
movl 48(%esp), %ecx
shll $3, %edx
shll $3, %ecx
vpinsrd $0x00, %edx, %xmm0, %xmm0
vpinsrd $2, %ecx, %xmm0, %xmm0
movl 44(%esp), %edx
movl 48(%esp), %ecx
shrl $29, %edx
shrl $29, %ecx
vpinsrd $0x01, %edx, %xmm0, %xmm0
vpinsrd $3, %ecx, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm1
vpshufd $0x4e, %xmm4, %xmm2
vpclmulqdq $0x11, %xmm5, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm4, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm0, %xmm0
vpxor %xmm1, %xmm3, %xmm4
vpslld $31, %xmm0, %xmm1
vpslld $30, %xmm0, %xmm2
vpslld $25, %xmm0, %xmm3
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpsrldq $4, %xmm1, %xmm3
vpslldq $12, %xmm1, %xmm1
vpxor %xmm1, %xmm0, %xmm0
vpsrld $0x01, %xmm0, %xmm1
vpsrld $2, %xmm0, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpsrld $7, %xmm0, %xmm0
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm4, %xmm4
vpxor %xmm6, %xmm4, %xmm0
movl 36(%esp), %edi
cmpl $16, 40(%esp)
je L_AES_GCM_encrypt_final_avx1_store_tag_16
xorl %ecx, %ecx
vmovdqu %xmm0, (%esp)
L_AES_GCM_encrypt_final_avx1_store_tag_loop:
movzbl (%esp,%ecx,1), %eax
movb %al, (%edi,%ecx,1)
incl %ecx
cmpl 40(%esp), %ecx
jne L_AES_GCM_encrypt_final_avx1_store_tag_loop
jmp L_AES_GCM_encrypt_final_avx1_store_tag_done
L_AES_GCM_encrypt_final_avx1_store_tag_16:
vmovdqu %xmm0, (%edi)
L_AES_GCM_encrypt_final_avx1_store_tag_done:
addl $16, %esp
popl %ebp
popl %edi
popl %esi
ret
.size AES_GCM_encrypt_final_avx1,.-AES_GCM_encrypt_final_avx1
.text
.globl AES_GCM_decrypt_update_avx1
.type AES_GCM_decrypt_update_avx1,@function
.align 16
AES_GCM_decrypt_update_avx1:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $0xa0, %esp
movl 208(%esp), %esi
vmovdqa (%esi), %xmm4
vmovdqu %xmm4, 64(%esp)
movl 200(%esp), %esi
movl 204(%esp), %ebp
vmovdqa (%esi), %xmm6
vmovdqa (%ebp), %xmm5
vmovdqu %xmm6, 80(%esp)
movl 180(%esp), %ebp
movl 188(%esp), %edi
movl 192(%esp), %esi
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_aes_gcm_avx1_mod2_128, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
xorl %ebx, %ebx
cmpl $0x40, 196(%esp)
movl 196(%esp), %eax
jl L_AES_GCM_decrypt_update_avx1_done_64
andl $0xffffffc0, %eax
vmovdqa %xmm6, %xmm2
# H ^ 1
vmovdqu %xmm5, (%esp)
# H ^ 2
vpclmulqdq $0x00, %xmm5, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm5, %xmm5, %xmm4
vpslld $31, %xmm0, %xmm1
vpslld $30, %xmm0, %xmm2
vpslld $25, %xmm0, %xmm3
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpsrldq $4, %xmm1, %xmm3
vpslldq $12, %xmm1, %xmm1
vpxor %xmm1, %xmm0, %xmm0
vpsrld $0x01, %xmm0, %xmm1
vpsrld $2, %xmm0, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpsrld $7, %xmm0, %xmm0
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
vmovdqu %xmm4, 16(%esp)
# H ^ 3
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm1
vpshufd $0x4e, %xmm4, %xmm2
vpclmulqdq $0x11, %xmm5, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm4, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm0, %xmm0
vpxor %xmm1, %xmm3, %xmm7
vpslld $31, %xmm0, %xmm1
vpslld $30, %xmm0, %xmm2
vpslld $25, %xmm0, %xmm3
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpsrldq $4, %xmm1, %xmm3
vpslldq $12, %xmm1, %xmm1
vpxor %xmm1, %xmm0, %xmm0
vpsrld $0x01, %xmm0, %xmm1
vpsrld $2, %xmm0, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpsrld $7, %xmm0, %xmm0
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
vmovdqu %xmm7, 32(%esp)
# H ^ 4
vpclmulqdq $0x00, %xmm4, %xmm4, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm4, %xmm7
vpslld $31, %xmm0, %xmm1
vpslld $30, %xmm0, %xmm2
vpslld $25, %xmm0, %xmm3
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpsrldq $4, %xmm1, %xmm3
vpslldq $12, %xmm1, %xmm1
vpxor %xmm1, %xmm0, %xmm0
vpsrld $0x01, %xmm0, %xmm1
vpsrld $2, %xmm0, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpsrld $7, %xmm0, %xmm0
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
vmovdqu %xmm7, 48(%esp)
cmpl %esi, %edi
jne L_AES_GCM_decrypt_update_avx1_ghash_64
L_AES_GCM_decrypt_update_avx1_ghash_64_inplace:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
vmovdqu 64(%esp), %xmm0
vmovdqa L_aes_gcm_avx1_bswap_epi64, %xmm7
vpaddd L_aes_gcm_avx1_one, %xmm0, %xmm1
vpshufb %xmm7, %xmm1, %xmm1
vpaddd L_aes_gcm_avx1_two, %xmm0, %xmm2
vpshufb %xmm7, %xmm2, %xmm2
vpaddd L_aes_gcm_avx1_three, %xmm0, %xmm3
vpshufb %xmm7, %xmm3, %xmm3
vpshufb %xmm7, %xmm0, %xmm0
vmovdqu 64(%esp), %xmm7
vpaddd L_aes_gcm_avx1_four, %xmm7, %xmm7
vmovdqu %xmm7, 64(%esp)
vmovdqa (%ebp), %xmm7
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm7, %xmm3, %xmm3
vmovdqa 16(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 32(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 48(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 64(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 80(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 96(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 112(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 128(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 144(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $11, 184(%esp)
vmovdqa 160(%ebp), %xmm7
jl L_AES_GCM_decrypt_update_avx1inplace_aesenc_64_ghash_avx_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 176(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $13, 184(%esp)
vmovdqa 192(%ebp), %xmm7
jl L_AES_GCM_decrypt_update_avx1inplace_aesenc_64_ghash_avx_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 208(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 224(%ebp), %xmm7
L_AES_GCM_decrypt_update_avx1inplace_aesenc_64_ghash_avx_aesenc_64_enc_done:
vaesenclast %xmm7, %xmm0, %xmm0
vaesenclast %xmm7, %xmm1, %xmm1
vmovdqu (%ecx), %xmm4
vmovdqu 16(%ecx), %xmm5
vpxor %xmm4, %xmm0, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vmovdqu %xmm4, 96(%esp)
vmovdqu %xmm5, 112(%esp)
vmovdqu %xmm0, (%edx)
vmovdqu %xmm1, 16(%edx)
vaesenclast %xmm7, %xmm2, %xmm2
vaesenclast %xmm7, %xmm3, %xmm3
vmovdqu 32(%ecx), %xmm4
vmovdqu 48(%ecx), %xmm5
vpxor %xmm4, %xmm2, %xmm2
vpxor %xmm5, %xmm3, %xmm3
vmovdqu %xmm4, 128(%esp)
vmovdqu %xmm5, 144(%esp)
vmovdqu %xmm2, 32(%edx)
vmovdqu %xmm3, 48(%edx)
# ghash encrypted counter
vmovdqu 80(%esp), %xmm2
vmovdqu 48(%esp), %xmm7
vmovdqu 96(%esp), %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm3
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm2
vpclmulqdq $0x00, %xmm5, %xmm1, %xmm1
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqu 32(%esp), %xmm7
vmovdqu 112(%esp), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 16(%esp), %xmm7
vmovdqu 128(%esp), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu (%esp), %xmm7
vmovdqu 144(%esp), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vpslldq $8, %xmm1, %xmm5
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm1, %xmm3, %xmm3
vpslld $31, %xmm2, %xmm7
vpslld $30, %xmm2, %xmm4
vpslld $25, %xmm2, %xmm5
vpxor %xmm4, %xmm7, %xmm7
vpxor %xmm5, %xmm7, %xmm7
vpsrldq $4, %xmm7, %xmm4
vpslldq $12, %xmm7, %xmm7
vpxor %xmm7, %xmm2, %xmm2
vpsrld $0x01, %xmm2, %xmm5
vpsrld $2, %xmm2, %xmm1
vpsrld $7, %xmm2, %xmm0
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm3, %xmm2, %xmm2
vmovdqu %xmm2, 80(%esp)
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_update_avx1_ghash_64_inplace
jmp L_AES_GCM_decrypt_update_avx1_ghash_64_done
L_AES_GCM_decrypt_update_avx1_ghash_64:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
vmovdqu 64(%esp), %xmm0
vmovdqa L_aes_gcm_avx1_bswap_epi64, %xmm7
vpaddd L_aes_gcm_avx1_one, %xmm0, %xmm1
vpshufb %xmm7, %xmm1, %xmm1
vpaddd L_aes_gcm_avx1_two, %xmm0, %xmm2
vpshufb %xmm7, %xmm2, %xmm2
vpaddd L_aes_gcm_avx1_three, %xmm0, %xmm3
vpshufb %xmm7, %xmm3, %xmm3
vpshufb %xmm7, %xmm0, %xmm0
vmovdqu 64(%esp), %xmm7
vpaddd L_aes_gcm_avx1_four, %xmm7, %xmm7
vmovdqu %xmm7, 64(%esp)
vmovdqa (%ebp), %xmm7
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm7, %xmm3, %xmm3
vmovdqa 16(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 32(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 48(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 64(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 80(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 96(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 112(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 128(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 144(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $11, 184(%esp)
vmovdqa 160(%ebp), %xmm7
jl L_AES_GCM_decrypt_update_avx1_aesenc_64_ghash_avx_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 176(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $13, 184(%esp)
vmovdqa 192(%ebp), %xmm7
jl L_AES_GCM_decrypt_update_avx1_aesenc_64_ghash_avx_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 208(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqa 224(%ebp), %xmm7
L_AES_GCM_decrypt_update_avx1_aesenc_64_ghash_avx_aesenc_64_enc_done:
vaesenclast %xmm7, %xmm0, %xmm0
vaesenclast %xmm7, %xmm1, %xmm1
vmovdqu (%ecx), %xmm4
vmovdqu 16(%ecx), %xmm5
vpxor %xmm4, %xmm0, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vmovdqu %xmm4, (%ecx)
vmovdqu %xmm5, 16(%ecx)
vmovdqu %xmm0, (%edx)
vmovdqu %xmm1, 16(%edx)
vaesenclast %xmm7, %xmm2, %xmm2
vaesenclast %xmm7, %xmm3, %xmm3
vmovdqu 32(%ecx), %xmm4
vmovdqu 48(%ecx), %xmm5
vpxor %xmm4, %xmm2, %xmm2
vpxor %xmm5, %xmm3, %xmm3
vmovdqu %xmm4, 32(%ecx)
vmovdqu %xmm5, 48(%ecx)
vmovdqu %xmm2, 32(%edx)
vmovdqu %xmm3, 48(%edx)
# ghash encrypted counter
vmovdqu 80(%esp), %xmm2
vmovdqu 48(%esp), %xmm7
vmovdqu (%ecx), %xmm0
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm3
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm2
vpclmulqdq $0x00, %xmm5, %xmm1, %xmm1
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vmovdqu 32(%esp), %xmm7
vmovdqu 16(%ecx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu 16(%esp), %xmm7
vmovdqu 32(%ecx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vmovdqu (%esp), %xmm7
vmovdqu 48(%ecx), %xmm0
vpshufd $0x4e, %xmm7, %xmm4
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm0, %xmm0
vpxor %xmm7, %xmm4, %xmm4
vpshufd $0x4e, %xmm0, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm7, %xmm0, %xmm6
vpclmulqdq $0x00, %xmm7, %xmm0, %xmm7
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm4
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm6, %xmm3, %xmm3
vpxor %xmm4, %xmm1, %xmm1
vpslldq $8, %xmm1, %xmm5
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm1, %xmm3, %xmm3
vpslld $31, %xmm2, %xmm7
vpslld $30, %xmm2, %xmm4
vpslld $25, %xmm2, %xmm5
vpxor %xmm4, %xmm7, %xmm7
vpxor %xmm5, %xmm7, %xmm7
vpsrldq $4, %xmm7, %xmm4
vpslldq $12, %xmm7, %xmm7
vpxor %xmm7, %xmm2, %xmm2
vpsrld $0x01, %xmm2, %xmm5
vpsrld $2, %xmm2, %xmm1
vpsrld $7, %xmm2, %xmm0
vpxor %xmm1, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm2, %xmm2
vpxor %xmm3, %xmm2, %xmm2
vmovdqu %xmm2, 80(%esp)
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_update_avx1_ghash_64
L_AES_GCM_decrypt_update_avx1_ghash_64_done:
vmovdqa %xmm2, %xmm6
vmovdqu (%esp), %xmm5
L_AES_GCM_decrypt_update_avx1_done_64:
movl 196(%esp), %edx
cmpl %edx, %ebx
jge L_AES_GCM_decrypt_update_avx1_done_dec
movl 196(%esp), %eax
andl $0xfffffff0, %eax
cmpl %eax, %ebx
jge L_AES_GCM_decrypt_update_avx1_last_block_done
L_AES_GCM_decrypt_update_avx1_last_block_start:
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
vmovdqu (%ecx), %xmm1
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm1, %xmm1
vpxor %xmm6, %xmm1, %xmm1
vmovdqu %xmm1, (%esp)
vmovdqu 64(%esp), %xmm1
vmovdqu (%esp), %xmm3
vpshufb L_aes_gcm_avx1_bswap_epi64, %xmm1, %xmm0
vpaddd L_aes_gcm_avx1_one, %xmm1, %xmm1
vmovdqu %xmm1, 64(%esp)
vpxor (%ebp), %xmm0, %xmm0
vpclmulqdq $16, %xmm5, %xmm3, %xmm4
vaesenc 16(%ebp), %xmm0, %xmm0
vaesenc 32(%ebp), %xmm0, %xmm0
vpclmulqdq $0x01, %xmm5, %xmm3, %xmm7
vaesenc 48(%ebp), %xmm0, %xmm0
vaesenc 64(%ebp), %xmm0, %xmm0
vaesenc 80(%ebp), %xmm0, %xmm0
vpclmulqdq $0x11, %xmm5, %xmm3, %xmm1
vaesenc 96(%ebp), %xmm0, %xmm0
vpxor %xmm7, %xmm4, %xmm4
vpslldq $8, %xmm4, %xmm2
vpsrldq $8, %xmm4, %xmm4
vaesenc 112(%ebp), %xmm0, %xmm0
vpclmulqdq $0x00, %xmm5, %xmm3, %xmm7
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm4, %xmm1, %xmm1
vmovdqa L_aes_gcm_avx1_mod2_128, %xmm3
vpclmulqdq $16, %xmm3, %xmm2, %xmm7
vaesenc 128(%ebp), %xmm0, %xmm0
vpshufd $0x4e, %xmm2, %xmm4
vpxor %xmm7, %xmm4, %xmm4
vpclmulqdq $16, %xmm3, %xmm4, %xmm7
vaesenc 144(%ebp), %xmm0, %xmm0
vpshufd $0x4e, %xmm4, %xmm6
vpxor %xmm7, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
cmpl $11, 184(%esp)
vmovdqa 160(%ebp), %xmm1
jl L_AES_GCM_decrypt_update_avx1_aesenc_gfmul_last
vaesenc %xmm1, %xmm0, %xmm0
vaesenc 176(%ebp), %xmm0, %xmm0
cmpl $13, 184(%esp)
vmovdqa 192(%ebp), %xmm1
jl L_AES_GCM_decrypt_update_avx1_aesenc_gfmul_last
vaesenc %xmm1, %xmm0, %xmm0
vaesenc 208(%ebp), %xmm0, %xmm0
vmovdqa 224(%ebp), %xmm1
L_AES_GCM_decrypt_update_avx1_aesenc_gfmul_last:
vaesenclast %xmm1, %xmm0, %xmm0
vmovdqu (%ecx), %xmm1
vpxor %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, (%edx)
addl $16, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_update_avx1_last_block_start
L_AES_GCM_decrypt_update_avx1_last_block_done:
L_AES_GCM_decrypt_update_avx1_done_dec:
movl 200(%esp), %esi
movl 208(%esp), %edi
vmovdqu 64(%esp), %xmm4
vmovdqa %xmm6, (%esi)
vmovdqu %xmm4, (%edi)
addl $0xa0, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_decrypt_update_avx1,.-AES_GCM_decrypt_update_avx1
.text
.globl AES_GCM_decrypt_final_avx1
.type AES_GCM_decrypt_final_avx1,@function
.align 16
AES_GCM_decrypt_final_avx1:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $16, %esp
movl 36(%esp), %ebp
movl 56(%esp), %esi
movl 60(%esp), %edi
vmovdqa (%ebp), %xmm6
vmovdqa (%esi), %xmm5
vmovdqa (%edi), %xmm7
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_aes_gcm_avx1_mod2_128, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
movl 48(%esp), %edx
movl 52(%esp), %ecx
shll $3, %edx
shll $3, %ecx
vpinsrd $0x00, %edx, %xmm0, %xmm0
vpinsrd $2, %ecx, %xmm0, %xmm0
movl 48(%esp), %edx
movl 52(%esp), %ecx
shrl $29, %edx
shrl $29, %ecx
vpinsrd $0x01, %edx, %xmm0, %xmm0
vpinsrd $3, %ecx, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
# ghash_gfmul_red_avx
vpshufd $0x4e, %xmm5, %xmm1
vpshufd $0x4e, %xmm6, %xmm2
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm0
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm6, %xmm2, %xmm2
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpslldq $8, %xmm1, %xmm2
vpsrldq $8, %xmm1, %xmm1
vpxor %xmm2, %xmm0, %xmm0
vpxor %xmm1, %xmm3, %xmm6
vpslld $31, %xmm0, %xmm1
vpslld $30, %xmm0, %xmm2
vpslld $25, %xmm0, %xmm3
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm3, %xmm1, %xmm1
vpsrldq $4, %xmm1, %xmm3
vpslldq $12, %xmm1, %xmm1
vpxor %xmm1, %xmm0, %xmm0
vpsrld $0x01, %xmm0, %xmm1
vpsrld $2, %xmm0, %xmm2
vpxor %xmm2, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpsrld $7, %xmm0, %xmm0
vpxor %xmm3, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
vpshufb L_aes_gcm_avx1_bswap_mask, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm0
movl 40(%esp), %esi
movl 64(%esp), %edi
cmpl $16, 44(%esp)
je L_AES_GCM_decrypt_final_avx1_cmp_tag_16
subl $16, %esp
xorl %ecx, %ecx
xorl %ebx, %ebx
vmovdqu %xmm0, (%esp)
L_AES_GCM_decrypt_final_avx1_cmp_tag_loop:
movzbl (%esp,%ecx,1), %eax
xorb (%esi,%ecx,1), %al
orb %al, %bl
incl %ecx
cmpl 44(%esp), %ecx
jne L_AES_GCM_decrypt_final_avx1_cmp_tag_loop
cmpb $0x00, %bl
sete %bl
addl $16, %esp
xorl %ecx, %ecx
jmp L_AES_GCM_decrypt_final_avx1_cmp_tag_done
L_AES_GCM_decrypt_final_avx1_cmp_tag_16:
vmovdqu (%esi), %xmm1
vpcmpeqb %xmm1, %xmm0, %xmm0
vpmovmskb %xmm0, %edx
# %%edx == 0xFFFF then return 1 else => return 0
xorl %ebx, %ebx
cmpl $0xffff, %edx
sete %bl
L_AES_GCM_decrypt_final_avx1_cmp_tag_done:
movl %ebx, (%edi)
addl $16, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_decrypt_final_avx1,.-AES_GCM_decrypt_final_avx1
#endif /* WOLFSSL_AESGCM_STREAM */
#endif /* HAVE_INTEL_AVX1 */
#ifdef HAVE_INTEL_AVX2
.text
.globl AES_GCM_encrypt_avx2
.type AES_GCM_encrypt_avx2,@function
.align 16
AES_GCM_encrypt_avx2:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $0x70, %esp
movl 144(%esp), %esi
movl 168(%esp), %ebp
movl 160(%esp), %edx
vpxor %xmm4, %xmm4, %xmm4
cmpl $12, %edx
je L_AES_GCM_encrypt_avx2_iv_12
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
vmovdqu (%ebp), %xmm5
vaesenc 16(%ebp), %xmm5, %xmm5
vaesenc 32(%ebp), %xmm5, %xmm5
vaesenc 48(%ebp), %xmm5, %xmm5
vaesenc 64(%ebp), %xmm5, %xmm5
vaesenc 80(%ebp), %xmm5, %xmm5
vaesenc 96(%ebp), %xmm5, %xmm5
vaesenc 112(%ebp), %xmm5, %xmm5
vaesenc 128(%ebp), %xmm5, %xmm5
vaesenc 144(%ebp), %xmm5, %xmm5
cmpl $11, 172(%esp)
vmovdqu 160(%ebp), %xmm0
jl L_AES_GCM_encrypt_avx2_calc_iv_1_aesenc_avx_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc 176(%ebp), %xmm5, %xmm5
cmpl $13, 172(%esp)
vmovdqu 192(%ebp), %xmm0
jl L_AES_GCM_encrypt_avx2_calc_iv_1_aesenc_avx_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc 208(%ebp), %xmm5, %xmm5
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_encrypt_avx2_calc_iv_1_aesenc_avx_last:
vaesenclast %xmm0, %xmm5, %xmm5
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm5, %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movl $0x00, %ecx
je L_AES_GCM_encrypt_avx2_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_encrypt_avx2_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_encrypt_avx2_calc_iv_16_loop:
vmovdqu (%esi,%ecx,1), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx2_calc_iv_16_loop
movl 160(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_encrypt_avx2_calc_iv_done
L_AES_GCM_encrypt_avx2_calc_iv_lt16:
vpxor %xmm0, %xmm0, %xmm0
xorl %ebx, %ebx
vmovdqu %xmm0, (%esp)
L_AES_GCM_encrypt_avx2_calc_iv_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx2_calc_iv_loop
vmovdqu (%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
L_AES_GCM_encrypt_avx2_calc_iv_done:
# T = Encrypt counter
vpxor %xmm0, %xmm0, %xmm0
shll $3, %edx
vpinsrd $0x00, %edx, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm4, %xmm4
# Encrypt counter
vmovdqu (%ebp), %xmm6
vpxor %xmm4, %xmm6, %xmm6
vaesenc 16(%ebp), %xmm6, %xmm6
vaesenc 32(%ebp), %xmm6, %xmm6
vaesenc 48(%ebp), %xmm6, %xmm6
vaesenc 64(%ebp), %xmm6, %xmm6
vaesenc 80(%ebp), %xmm6, %xmm6
vaesenc 96(%ebp), %xmm6, %xmm6
vaesenc 112(%ebp), %xmm6, %xmm6
vaesenc 128(%ebp), %xmm6, %xmm6
vaesenc 144(%ebp), %xmm6, %xmm6
cmpl $11, 172(%esp)
vmovdqu 160(%ebp), %xmm0
jl L_AES_GCM_encrypt_avx2_calc_iv_2_aesenc_avx_last
vaesenc %xmm0, %xmm6, %xmm6
vaesenc 176(%ebp), %xmm6, %xmm6
cmpl $13, 172(%esp)
vmovdqu 192(%ebp), %xmm0
jl L_AES_GCM_encrypt_avx2_calc_iv_2_aesenc_avx_last
vaesenc %xmm0, %xmm6, %xmm6
vaesenc 208(%ebp), %xmm6, %xmm6
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_encrypt_avx2_calc_iv_2_aesenc_avx_last:
vaesenclast %xmm0, %xmm6, %xmm6
jmp L_AES_GCM_encrypt_avx2_iv_done
L_AES_GCM_encrypt_avx2_iv_12:
# # Calculate values when IV is 12 bytes
# Set counter based on IV
vmovdqu L_avx2_aes_gcm_bswap_one, %xmm4
vmovdqu (%ebp), %xmm5
vpblendd $7, (%esi), %xmm4, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
vmovdqu 16(%ebp), %xmm7
vpxor %xmm5, %xmm4, %xmm6
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm6, %xmm6
vmovdqu 32(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 48(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 64(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 80(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 96(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 112(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 128(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 144(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
cmpl $11, 172(%esp)
vmovdqu 160(%ebp), %xmm0
jl L_AES_GCM_encrypt_avx2_calc_iv_12_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 176(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
cmpl $13, 172(%esp)
vmovdqu 192(%ebp), %xmm0
jl L_AES_GCM_encrypt_avx2_calc_iv_12_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 208(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_encrypt_avx2_calc_iv_12_last:
vaesenclast %xmm0, %xmm5, %xmm5
vaesenclast %xmm0, %xmm6, %xmm6
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm5, %xmm5
L_AES_GCM_encrypt_avx2_iv_done:
vmovdqu %xmm6, 80(%esp)
vpxor %xmm6, %xmm6, %xmm6
movl 140(%esp), %esi
# Additional authentication data
movl 156(%esp), %edx
cmpl $0x00, %edx
je L_AES_GCM_encrypt_avx2_calc_aad_done
xorl %ecx, %ecx
cmpl $16, %edx
jl L_AES_GCM_encrypt_avx2_calc_aad_lt16
andl $0xfffffff0, %edx
L_AES_GCM_encrypt_avx2_calc_aad_16_loop:
vmovdqu (%esi,%ecx,1), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
# ghash_gfmul_avx
vpclmulqdq $16, %xmm6, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm6, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm6, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm6
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm6, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm6, %xmm6
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm6, %xmm6
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm6, %xmm6
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx2_calc_aad_16_loop
movl 156(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_encrypt_avx2_calc_aad_done
L_AES_GCM_encrypt_avx2_calc_aad_lt16:
vpxor %xmm0, %xmm0, %xmm0
xorl %ebx, %ebx
vmovdqu %xmm0, (%esp)
L_AES_GCM_encrypt_avx2_calc_aad_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_encrypt_avx2_calc_aad_loop
vmovdqu (%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
# ghash_gfmul_avx
vpclmulqdq $16, %xmm6, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm6, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm6, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm6
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm6, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm6, %xmm6
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm6, %xmm6
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm6, %xmm6
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
L_AES_GCM_encrypt_avx2_calc_aad_done:
movl 132(%esp), %esi
movl 136(%esp), %edi
# Calculate counter and H
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpshufb L_aes_gcm_avx2_bswap_epi64, %xmm4, %xmm4
vpand L_aes_gcm_avx2_mod2_128, %xmm5, %xmm5
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm4
vpxor %xmm0, %xmm5, %xmm5
xorl %ebx, %ebx
cmpl $0x40, 152(%esp)
movl 152(%esp), %eax
jl L_AES_GCM_encrypt_avx2_done_64
andl $0xffffffc0, %eax
vmovdqu %xmm4, 64(%esp)
vmovdqu %xmm6, 96(%esp)
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm3
# H ^ 1
vmovdqu %xmm5, (%esp)
vmovdqu %xmm5, %xmm2
# H ^ 2
vpclmulqdq $0x00, %xmm2, %xmm2, %xmm5
vpclmulqdq $0x11, %xmm2, %xmm2, %xmm6
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm6, %xmm0
vmovdqu %xmm0, 16(%esp)
# H ^ 3
# ghash_gfmul_red
vpclmulqdq $16, %xmm0, %xmm2, %xmm6
vpclmulqdq $0x01, %xmm0, %xmm2, %xmm5
vpclmulqdq $0x00, %xmm0, %xmm2, %xmm4
vpxor %xmm5, %xmm6, %xmm6
vpslldq $8, %xmm6, %xmm5
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm0, %xmm2, %xmm1
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm1, 32(%esp)
# H ^ 4
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm5
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm6
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm6, %xmm2
vmovdqu %xmm2, 48(%esp)
vmovdqu 96(%esp), %xmm6
# First 64 bytes of input
# aesenc_64
# aesenc_ctr
vmovdqu 64(%esp), %xmm4
vmovdqu L_aes_gcm_avx2_bswap_epi64, %xmm7
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm1
vpshufb %xmm7, %xmm4, %xmm0
vpaddd L_aes_gcm_avx2_two, %xmm4, %xmm2
vpshufb %xmm7, %xmm1, %xmm1
vpaddd L_aes_gcm_avx2_three, %xmm4, %xmm3
vpshufb %xmm7, %xmm2, %xmm2
vpaddd L_aes_gcm_avx2_four, %xmm4, %xmm4
vpshufb %xmm7, %xmm3, %xmm3
# aesenc_xor
vmovdqu (%ebp), %xmm7
vmovdqu %xmm4, 64(%esp)
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm7, %xmm3, %xmm3
vmovdqu 16(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 32(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 48(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 64(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 80(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 96(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 112(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 128(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 144(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $11, 172(%esp)
vmovdqu 160(%ebp), %xmm7
jl L_AES_GCM_encrypt_avx2_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 176(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $13, 172(%esp)
vmovdqu 192(%ebp), %xmm7
jl L_AES_GCM_encrypt_avx2_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 208(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 224(%ebp), %xmm7
L_AES_GCM_encrypt_avx2_aesenc_64_enc_done:
# aesenc_last
vaesenclast %xmm7, %xmm0, %xmm0
vaesenclast %xmm7, %xmm1, %xmm1
vaesenclast %xmm7, %xmm2, %xmm2
vaesenclast %xmm7, %xmm3, %xmm3
vmovdqu (%esi), %xmm7
vmovdqu 16(%esi), %xmm4
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, (%edi)
vmovdqu %xmm1, 16(%edi)
vmovdqu 32(%esi), %xmm7
vmovdqu 48(%esi), %xmm4
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm4, %xmm3, %xmm3
vmovdqu %xmm2, 32(%edi)
vmovdqu %xmm3, 48(%edi)
cmpl $0x40, %eax
movl $0x40, %ebx
movl %esi, %ecx
movl %edi, %edx
jle L_AES_GCM_encrypt_avx2_end_64
# More 64 bytes of input
L_AES_GCM_encrypt_avx2_ghash_64:
# aesenc_64_ghash
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# aesenc_64
# aesenc_ctr
vmovdqu 64(%esp), %xmm4
vmovdqu L_aes_gcm_avx2_bswap_epi64, %xmm7
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm1
vpshufb %xmm7, %xmm4, %xmm0
vpaddd L_aes_gcm_avx2_two, %xmm4, %xmm2
vpshufb %xmm7, %xmm1, %xmm1
vpaddd L_aes_gcm_avx2_three, %xmm4, %xmm3
vpshufb %xmm7, %xmm2, %xmm2
vpaddd L_aes_gcm_avx2_four, %xmm4, %xmm4
vpshufb %xmm7, %xmm3, %xmm3
# aesenc_xor
vmovdqu (%ebp), %xmm7
vmovdqu %xmm4, 64(%esp)
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm7, %xmm3, %xmm3
vmovdqu 16(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 32(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 48(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 64(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 80(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 96(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 112(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 128(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 144(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $11, 172(%esp)
vmovdqu 160(%ebp), %xmm7
jl L_AES_GCM_encrypt_avx2_aesenc_64_ghash_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 176(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $13, 172(%esp)
vmovdqu 192(%ebp), %xmm7
jl L_AES_GCM_encrypt_avx2_aesenc_64_ghash_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 208(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 224(%ebp), %xmm7
L_AES_GCM_encrypt_avx2_aesenc_64_ghash_aesenc_64_enc_done:
# aesenc_last
vaesenclast %xmm7, %xmm0, %xmm0
vaesenclast %xmm7, %xmm1, %xmm1
vaesenclast %xmm7, %xmm2, %xmm2
vaesenclast %xmm7, %xmm3, %xmm3
vmovdqu (%ecx), %xmm7
vmovdqu 16(%ecx), %xmm4
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, (%edx)
vmovdqu %xmm1, 16(%edx)
vmovdqu 32(%ecx), %xmm7
vmovdqu 48(%ecx), %xmm4
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm4, %xmm3, %xmm3
vmovdqu %xmm2, 32(%edx)
vmovdqu %xmm3, 48(%edx)
# pclmul_1
vmovdqu -64(%edx), %xmm1
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vmovdqu 48(%esp), %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm5
vpclmulqdq $0x01, %xmm2, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
# pclmul_2
vmovdqu -48(%edx), %xmm1
vmovdqu 32(%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# pclmul_n
vmovdqu -32(%edx), %xmm1
vmovdqu 16(%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# pclmul_n
vmovdqu -16(%edx), %xmm1
vmovdqu (%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# aesenc_pclmul_l
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm3, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm1
vpsrldq $8, %xmm5, %xmm5
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm0
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm6
# aesenc_64_ghash - end
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_encrypt_avx2_ghash_64
L_AES_GCM_encrypt_avx2_end_64:
vmovdqu %xmm6, 96(%esp)
vmovdqu 48(%edx), %xmm3
vmovdqu (%esp), %xmm7
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm3, %xmm3
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
vpclmulqdq $0x01, %xmm3, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm3, %xmm7, %xmm4
vpclmulqdq $0x11, %xmm3, %xmm7, %xmm6
vpxor %xmm1, %xmm5, %xmm5
vmovdqu 32(%edx), %xmm3
vmovdqu 16(%esp), %xmm7
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm3, %xmm3
vpclmulqdq $16, %xmm3, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm3, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm3, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm3, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vmovdqu 16(%edx), %xmm3
vmovdqu 32(%esp), %xmm7
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm3, %xmm3
vpclmulqdq $16, %xmm3, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm3, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm3, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm3, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vmovdqu 96(%esp), %xmm0
vmovdqu (%edx), %xmm3
vmovdqu 48(%esp), %xmm7
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm3, %xmm3
vpxor %xmm0, %xmm3, %xmm3
vpclmulqdq $16, %xmm3, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm3, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm3, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm3, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vpslldq $8, %xmm5, %xmm7
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm7, %xmm4, %xmm4
vpxor %xmm5, %xmm6, %xmm6
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm4, %xmm0
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
vmovdqu (%esp), %xmm5
vmovdqu 64(%esp), %xmm4
L_AES_GCM_encrypt_avx2_done_64:
cmpl 152(%esp), %ebx
je L_AES_GCM_encrypt_avx2_done_enc
movl 152(%esp), %eax
andl $0xfffffff0, %eax
cmpl %eax, %ebx
jge L_AES_GCM_encrypt_avx2_last_block_done
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# aesenc_block
vmovdqu %xmm4, %xmm1
vpshufb L_aes_gcm_avx2_bswap_epi64, %xmm1, %xmm0
vpaddd L_aes_gcm_avx2_one, %xmm1, %xmm1
vpxor (%ebp), %xmm0, %xmm0
vaesenc 16(%ebp), %xmm0, %xmm0
vaesenc 32(%ebp), %xmm0, %xmm0
vaesenc 48(%ebp), %xmm0, %xmm0
vaesenc 64(%ebp), %xmm0, %xmm0
vaesenc 80(%ebp), %xmm0, %xmm0
vaesenc 96(%ebp), %xmm0, %xmm0
vaesenc 112(%ebp), %xmm0, %xmm0
vaesenc 128(%ebp), %xmm0, %xmm0
vaesenc 144(%ebp), %xmm0, %xmm0
cmpl $11, 172(%esp)
vmovdqu 160(%ebp), %xmm2
jl L_AES_GCM_encrypt_avx2_aesenc_block_aesenc_avx_last
vaesenc %xmm2, %xmm0, %xmm0
vaesenc 176(%ebp), %xmm0, %xmm0
cmpl $13, 172(%esp)
vmovdqu 192(%ebp), %xmm2
jl L_AES_GCM_encrypt_avx2_aesenc_block_aesenc_avx_last
vaesenc %xmm2, %xmm0, %xmm0
vaesenc 208(%ebp), %xmm0, %xmm0
vmovdqu 224(%ebp), %xmm2
L_AES_GCM_encrypt_avx2_aesenc_block_aesenc_avx_last:
vaesenclast %xmm2, %xmm0, %xmm0
vmovdqu %xmm1, %xmm4
vmovdqu (%ecx), %xmm1
vpxor %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, (%edx)
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
addl $16, %ebx
cmpl %eax, %ebx
jge L_AES_GCM_encrypt_avx2_last_block_ghash
L_AES_GCM_encrypt_avx2_last_block_start:
vpshufb L_aes_gcm_avx2_bswap_epi64, %xmm4, %xmm7
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm4
vmovdqu %xmm4, 64(%esp)
# aesenc_gfmul_sb
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm2
vpclmulqdq $16, %xmm5, %xmm6, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm4
vpxor (%ebp), %xmm7, %xmm7
vaesenc 16(%ebp), %xmm7, %xmm7
vpxor %xmm2, %xmm3, %xmm3
vpslldq $8, %xmm3, %xmm2
vpsrldq $8, %xmm3, %xmm3
vaesenc 32(%ebp), %xmm7, %xmm7
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm2, %xmm1
vaesenc 48(%ebp), %xmm7, %xmm7
vaesenc 64(%ebp), %xmm7, %xmm7
vaesenc 80(%ebp), %xmm7, %xmm7
vpshufd $0x4e, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm2, %xmm1
vaesenc 96(%ebp), %xmm7, %xmm7
vaesenc 112(%ebp), %xmm7, %xmm7
vaesenc 128(%ebp), %xmm7, %xmm7
vpshufd $0x4e, %xmm2, %xmm2
vaesenc 144(%ebp), %xmm7, %xmm7
vpxor %xmm3, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
vmovdqu 160(%ebp), %xmm0
cmpl $11, 172(%esp)
jl L_AES_GCM_encrypt_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm7, %xmm7
vaesenc 176(%ebp), %xmm7, %xmm7
vmovdqu 192(%ebp), %xmm0
cmpl $13, 172(%esp)
jl L_AES_GCM_encrypt_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm7, %xmm7
vaesenc 208(%ebp), %xmm7, %xmm7
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_encrypt_avx2_aesenc_gfmul_sb_last:
vaesenclast %xmm0, %xmm7, %xmm7
vmovdqu (%esi,%ebx,1), %xmm3
vpxor %xmm1, %xmm2, %xmm6
vpxor %xmm3, %xmm7, %xmm7
vmovdqu %xmm7, (%edi,%ebx,1)
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm7, %xmm7
vpxor %xmm7, %xmm6, %xmm6
vmovdqu 64(%esp), %xmm4
addl $16, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_encrypt_avx2_last_block_start
L_AES_GCM_encrypt_avx2_last_block_ghash:
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm6, %xmm2
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm1
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm0
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm6
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm0, %xmm6, %xmm6
L_AES_GCM_encrypt_avx2_last_block_done:
movl 152(%esp), %ecx
movl 152(%esp), %edx
andl $15, %ecx
jz L_AES_GCM_encrypt_avx2_done_enc
# aesenc_last15_enc
vpshufb L_aes_gcm_avx2_bswap_epi64, %xmm4, %xmm4
vpxor (%ebp), %xmm4, %xmm4
vaesenc 16(%ebp), %xmm4, %xmm4
vaesenc 32(%ebp), %xmm4, %xmm4
vaesenc 48(%ebp), %xmm4, %xmm4
vaesenc 64(%ebp), %xmm4, %xmm4
vaesenc 80(%ebp), %xmm4, %xmm4
vaesenc 96(%ebp), %xmm4, %xmm4
vaesenc 112(%ebp), %xmm4, %xmm4
vaesenc 128(%ebp), %xmm4, %xmm4
vaesenc 144(%ebp), %xmm4, %xmm4
cmpl $11, 172(%esp)
vmovdqu 160(%ebp), %xmm0
jl L_AES_GCM_encrypt_avx2_aesenc_last15_enc_avx_aesenc_avx_last
vaesenc %xmm0, %xmm4, %xmm4
vaesenc 176(%ebp), %xmm4, %xmm4
cmpl $13, 172(%esp)
vmovdqu 192(%ebp), %xmm0
jl L_AES_GCM_encrypt_avx2_aesenc_last15_enc_avx_aesenc_avx_last
vaesenc %xmm0, %xmm4, %xmm4
vaesenc 208(%ebp), %xmm4, %xmm4
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_encrypt_avx2_aesenc_last15_enc_avx_aesenc_avx_last:
vaesenclast %xmm0, %xmm4, %xmm4
xorl %ecx, %ecx
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm4, (%esp)
vmovdqu %xmm0, 16(%esp)
L_AES_GCM_encrypt_avx2_aesenc_last15_enc_avx_loop:
movzbl (%esi,%ebx,1), %eax
xorb (%esp,%ecx,1), %al
movb %al, 16(%esp,%ecx,1)
movb %al, (%edi,%ebx,1)
incl %ebx
incl %ecx
cmpl %edx, %ebx
jl L_AES_GCM_encrypt_avx2_aesenc_last15_enc_avx_loop
L_AES_GCM_encrypt_avx2_aesenc_last15_enc_avx_finish_enc:
vmovdqu 16(%esp), %xmm4
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm4, %xmm4
vpxor %xmm4, %xmm6, %xmm6
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm6, %xmm2
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm1
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm0
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm6
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm0, %xmm6, %xmm6
L_AES_GCM_encrypt_avx2_done_enc:
vmovdqu 80(%esp), %xmm7
# calc_tag
movl 152(%esp), %ecx
shll $3, %ecx
vpinsrd $0x00, %ecx, %xmm0, %xmm0
movl 156(%esp), %ecx
shll $3, %ecx
vpinsrd $2, %ecx, %xmm0, %xmm0
movl 152(%esp), %ecx
shrl $29, %ecx
vpinsrd $0x01, %ecx, %xmm0, %xmm0
movl 156(%esp), %ecx
shrl $29, %ecx
vpinsrd $3, %ecx, %xmm0, %xmm0
vpxor %xmm6, %xmm0, %xmm0
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm0, %xmm4
vpclmulqdq $0x01, %xmm5, %xmm0, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm4, %xmm4
vpslldq $8, %xmm4, %xmm3
vpsrldq $8, %xmm4, %xmm4
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm0
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm4, %xmm0, %xmm0
vpxor %xmm3, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm7, %xmm0, %xmm0
movl 148(%esp), %edi
movl 164(%esp), %ebx
# store_tag
cmpl $16, %ebx
je L_AES_GCM_encrypt_avx2_store_tag_16
xorl %ecx, %ecx
vmovdqu %xmm0, (%esp)
L_AES_GCM_encrypt_avx2_store_tag_loop:
movzbl (%esp,%ecx,1), %eax
movb %al, (%edi,%ecx,1)
incl %ecx
cmpl %ebx, %ecx
jne L_AES_GCM_encrypt_avx2_store_tag_loop
jmp L_AES_GCM_encrypt_avx2_store_tag_done
L_AES_GCM_encrypt_avx2_store_tag_16:
vmovdqu %xmm0, (%edi)
L_AES_GCM_encrypt_avx2_store_tag_done:
addl $0x70, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_encrypt_avx2,.-AES_GCM_encrypt_avx2
.text
.globl AES_GCM_decrypt_avx2
.type AES_GCM_decrypt_avx2,@function
.align 16
AES_GCM_decrypt_avx2:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $0xb0, %esp
movl 208(%esp), %esi
movl 232(%esp), %ebp
vpxor %xmm4, %xmm4, %xmm4
movl 224(%esp), %edx
cmpl $12, %edx
je L_AES_GCM_decrypt_avx2_iv_12
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
vmovdqu (%ebp), %xmm5
vaesenc 16(%ebp), %xmm5, %xmm5
vaesenc 32(%ebp), %xmm5, %xmm5
vaesenc 48(%ebp), %xmm5, %xmm5
vaesenc 64(%ebp), %xmm5, %xmm5
vaesenc 80(%ebp), %xmm5, %xmm5
vaesenc 96(%ebp), %xmm5, %xmm5
vaesenc 112(%ebp), %xmm5, %xmm5
vaesenc 128(%ebp), %xmm5, %xmm5
vaesenc 144(%ebp), %xmm5, %xmm5
cmpl $11, 236(%esp)
vmovdqu 160(%ebp), %xmm0
jl L_AES_GCM_decrypt_avx2_calc_iv_1_aesenc_avx_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc 176(%ebp), %xmm5, %xmm5
cmpl $13, 236(%esp)
vmovdqu 192(%ebp), %xmm0
jl L_AES_GCM_decrypt_avx2_calc_iv_1_aesenc_avx_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc 208(%ebp), %xmm5, %xmm5
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_decrypt_avx2_calc_iv_1_aesenc_avx_last:
vaesenclast %xmm0, %xmm5, %xmm5
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm5, %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movl $0x00, %ecx
je L_AES_GCM_decrypt_avx2_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_decrypt_avx2_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_decrypt_avx2_calc_iv_16_loop:
vmovdqu (%esi,%ecx,1), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx2_calc_iv_16_loop
movl 224(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_decrypt_avx2_calc_iv_done
L_AES_GCM_decrypt_avx2_calc_iv_lt16:
vpxor %xmm0, %xmm0, %xmm0
xorl %ebx, %ebx
vmovdqu %xmm0, (%esp)
L_AES_GCM_decrypt_avx2_calc_iv_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx2_calc_iv_loop
vmovdqu (%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
L_AES_GCM_decrypt_avx2_calc_iv_done:
# T = Encrypt counter
vpxor %xmm0, %xmm0, %xmm0
shll $3, %edx
vpinsrd $0x00, %edx, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm4, %xmm4
# Encrypt counter
vmovdqu (%ebp), %xmm6
vpxor %xmm4, %xmm6, %xmm6
vaesenc 16(%ebp), %xmm6, %xmm6
vaesenc 32(%ebp), %xmm6, %xmm6
vaesenc 48(%ebp), %xmm6, %xmm6
vaesenc 64(%ebp), %xmm6, %xmm6
vaesenc 80(%ebp), %xmm6, %xmm6
vaesenc 96(%ebp), %xmm6, %xmm6
vaesenc 112(%ebp), %xmm6, %xmm6
vaesenc 128(%ebp), %xmm6, %xmm6
vaesenc 144(%ebp), %xmm6, %xmm6
cmpl $11, 236(%esp)
vmovdqu 160(%ebp), %xmm0
jl L_AES_GCM_decrypt_avx2_calc_iv_2_aesenc_avx_last
vaesenc %xmm0, %xmm6, %xmm6
vaesenc 176(%ebp), %xmm6, %xmm6
cmpl $13, 236(%esp)
vmovdqu 192(%ebp), %xmm0
jl L_AES_GCM_decrypt_avx2_calc_iv_2_aesenc_avx_last
vaesenc %xmm0, %xmm6, %xmm6
vaesenc 208(%ebp), %xmm6, %xmm6
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_decrypt_avx2_calc_iv_2_aesenc_avx_last:
vaesenclast %xmm0, %xmm6, %xmm6
jmp L_AES_GCM_decrypt_avx2_iv_done
L_AES_GCM_decrypt_avx2_iv_12:
# # Calculate values when IV is 12 bytes
# Set counter based on IV
vmovdqu L_avx2_aes_gcm_bswap_one, %xmm4
vmovdqu (%ebp), %xmm5
vpblendd $7, (%esi), %xmm4, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
vmovdqu 16(%ebp), %xmm7
vpxor %xmm5, %xmm4, %xmm6
vaesenc %xmm7, %xmm5, %xmm5
vaesenc %xmm7, %xmm6, %xmm6
vmovdqu 32(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 48(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 64(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 80(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 96(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 112(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 128(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 144(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
cmpl $11, 236(%esp)
vmovdqu 160(%ebp), %xmm0
jl L_AES_GCM_decrypt_avx2_calc_iv_12_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 176(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
cmpl $13, 236(%esp)
vmovdqu 192(%ebp), %xmm0
jl L_AES_GCM_decrypt_avx2_calc_iv_12_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 208(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm6, %xmm6
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_decrypt_avx2_calc_iv_12_last:
vaesenclast %xmm0, %xmm5, %xmm5
vaesenclast %xmm0, %xmm6, %xmm6
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm5, %xmm5
L_AES_GCM_decrypt_avx2_iv_done:
vmovdqu %xmm6, 80(%esp)
vpxor %xmm6, %xmm6, %xmm6
movl 204(%esp), %esi
# Additional authentication data
movl 220(%esp), %edx
cmpl $0x00, %edx
je L_AES_GCM_decrypt_avx2_calc_aad_done
xorl %ecx, %ecx
cmpl $16, %edx
jl L_AES_GCM_decrypt_avx2_calc_aad_lt16
andl $0xfffffff0, %edx
L_AES_GCM_decrypt_avx2_calc_aad_16_loop:
vmovdqu (%esi,%ecx,1), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
# ghash_gfmul_avx
vpclmulqdq $16, %xmm6, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm6, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm6, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm6
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm6, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm6, %xmm6
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm6, %xmm6
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm6, %xmm6
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx2_calc_aad_16_loop
movl 220(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_decrypt_avx2_calc_aad_done
L_AES_GCM_decrypt_avx2_calc_aad_lt16:
vpxor %xmm0, %xmm0, %xmm0
xorl %ebx, %ebx
vmovdqu %xmm0, (%esp)
L_AES_GCM_decrypt_avx2_calc_aad_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_decrypt_avx2_calc_aad_loop
vmovdqu (%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
# ghash_gfmul_avx
vpclmulqdq $16, %xmm6, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm6, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm6, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm6, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm7
vpxor %xmm2, %xmm3, %xmm6
# ghash_mid
vpsrld $31, %xmm7, %xmm0
vpsrld $31, %xmm6, %xmm1
vpslld $0x01, %xmm7, %xmm7
vpslld $0x01, %xmm6, %xmm6
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm6, %xmm6
vpor %xmm0, %xmm7, %xmm7
vpor %xmm1, %xmm6, %xmm6
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm7, %xmm0
vpshufd $0x4e, %xmm7, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
L_AES_GCM_decrypt_avx2_calc_aad_done:
movl 196(%esp), %esi
movl 200(%esp), %edi
# Calculate counter and H
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpshufb L_aes_gcm_avx2_bswap_epi64, %xmm4, %xmm4
vpand L_aes_gcm_avx2_mod2_128, %xmm5, %xmm5
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm4
vpxor %xmm0, %xmm5, %xmm5
xorl %ebx, %ebx
cmpl $0x40, 216(%esp)
movl 216(%esp), %eax
jl L_AES_GCM_decrypt_avx2_done_64
andl $0xffffffc0, %eax
vmovdqu %xmm4, 64(%esp)
vmovdqu %xmm6, 96(%esp)
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm3
# H ^ 1
vmovdqu %xmm5, (%esp)
vmovdqu %xmm5, %xmm2
# H ^ 2
vpclmulqdq $0x00, %xmm2, %xmm2, %xmm5
vpclmulqdq $0x11, %xmm2, %xmm2, %xmm6
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm6, %xmm0
vmovdqu %xmm0, 16(%esp)
# H ^ 3
# ghash_gfmul_red
vpclmulqdq $16, %xmm0, %xmm2, %xmm6
vpclmulqdq $0x01, %xmm0, %xmm2, %xmm5
vpclmulqdq $0x00, %xmm0, %xmm2, %xmm4
vpxor %xmm5, %xmm6, %xmm6
vpslldq $8, %xmm6, %xmm5
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm0, %xmm2, %xmm1
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm1, 32(%esp)
# H ^ 4
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm5
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm6
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm6, %xmm2
vmovdqu %xmm2, 48(%esp)
vmovdqu 96(%esp), %xmm6
cmpl %esi, %edi
jne L_AES_GCM_decrypt_avx2_ghash_64
L_AES_GCM_decrypt_avx2_ghash_64_inplace:
# aesenc_64_ghash
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# aesenc_64
# aesenc_ctr
vmovdqu 64(%esp), %xmm4
vmovdqu L_aes_gcm_avx2_bswap_epi64, %xmm7
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm1
vpshufb %xmm7, %xmm4, %xmm0
vpaddd L_aes_gcm_avx2_two, %xmm4, %xmm2
vpshufb %xmm7, %xmm1, %xmm1
vpaddd L_aes_gcm_avx2_three, %xmm4, %xmm3
vpshufb %xmm7, %xmm2, %xmm2
vpaddd L_aes_gcm_avx2_four, %xmm4, %xmm4
vpshufb %xmm7, %xmm3, %xmm3
# aesenc_xor
vmovdqu (%ebp), %xmm7
vmovdqu %xmm4, 64(%esp)
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm7, %xmm3, %xmm3
vmovdqu 16(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 32(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 48(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 64(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 80(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 96(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 112(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 128(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 144(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $11, 236(%esp)
vmovdqu 160(%ebp), %xmm7
jl L_AES_GCM_decrypt_avx2_inplace_aesenc_64_ghash_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 176(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $13, 236(%esp)
vmovdqu 192(%ebp), %xmm7
jl L_AES_GCM_decrypt_avx2_inplace_aesenc_64_ghash_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 208(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 224(%ebp), %xmm7
L_AES_GCM_decrypt_avx2_inplace_aesenc_64_ghash_aesenc_64_enc_done:
# aesenc_last
vaesenclast %xmm7, %xmm0, %xmm0
vaesenclast %xmm7, %xmm1, %xmm1
vaesenclast %xmm7, %xmm2, %xmm2
vaesenclast %xmm7, %xmm3, %xmm3
vmovdqu (%ecx), %xmm7
vmovdqu 16(%ecx), %xmm4
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm7, 112(%esp)
vmovdqu %xmm4, 128(%esp)
vmovdqu %xmm0, (%edx)
vmovdqu %xmm1, 16(%edx)
vmovdqu 32(%ecx), %xmm7
vmovdqu 48(%ecx), %xmm4
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm4, %xmm3, %xmm3
vmovdqu %xmm7, 144(%esp)
vmovdqu %xmm4, 160(%esp)
vmovdqu %xmm2, 32(%edx)
vmovdqu %xmm3, 48(%edx)
# pclmul_1
vmovdqu 112(%esp), %xmm1
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vmovdqu 48(%esp), %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm5
vpclmulqdq $0x01, %xmm2, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
# pclmul_2
vmovdqu 128(%esp), %xmm1
vmovdqu 32(%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# pclmul_n
vmovdqu 144(%esp), %xmm1
vmovdqu 16(%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# pclmul_n
vmovdqu 160(%esp), %xmm1
vmovdqu (%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# aesenc_pclmul_l
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm3, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm1
vpsrldq $8, %xmm5, %xmm5
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm0
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm6
# aesenc_64_ghash - end
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_avx2_ghash_64_inplace
jmp L_AES_GCM_decrypt_avx2_ghash_64_done
L_AES_GCM_decrypt_avx2_ghash_64:
# aesenc_64_ghash
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# aesenc_64
# aesenc_ctr
vmovdqu 64(%esp), %xmm4
vmovdqu L_aes_gcm_avx2_bswap_epi64, %xmm7
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm1
vpshufb %xmm7, %xmm4, %xmm0
vpaddd L_aes_gcm_avx2_two, %xmm4, %xmm2
vpshufb %xmm7, %xmm1, %xmm1
vpaddd L_aes_gcm_avx2_three, %xmm4, %xmm3
vpshufb %xmm7, %xmm2, %xmm2
vpaddd L_aes_gcm_avx2_four, %xmm4, %xmm4
vpshufb %xmm7, %xmm3, %xmm3
# aesenc_xor
vmovdqu (%ebp), %xmm7
vmovdqu %xmm4, 64(%esp)
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm7, %xmm3, %xmm3
vmovdqu 16(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 32(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 48(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 64(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 80(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 96(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 112(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 128(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 144(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $11, 236(%esp)
vmovdqu 160(%ebp), %xmm7
jl L_AES_GCM_decrypt_avx2_aesenc_64_ghash_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 176(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $13, 236(%esp)
vmovdqu 192(%ebp), %xmm7
jl L_AES_GCM_decrypt_avx2_aesenc_64_ghash_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 208(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 224(%ebp), %xmm7
L_AES_GCM_decrypt_avx2_aesenc_64_ghash_aesenc_64_enc_done:
# aesenc_last
vaesenclast %xmm7, %xmm0, %xmm0
vaesenclast %xmm7, %xmm1, %xmm1
vaesenclast %xmm7, %xmm2, %xmm2
vaesenclast %xmm7, %xmm3, %xmm3
vmovdqu (%ecx), %xmm7
vmovdqu 16(%ecx), %xmm4
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm7, (%ecx)
vmovdqu %xmm4, 16(%ecx)
vmovdqu %xmm0, (%edx)
vmovdqu %xmm1, 16(%edx)
vmovdqu 32(%ecx), %xmm7
vmovdqu 48(%ecx), %xmm4
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm4, %xmm3, %xmm3
vmovdqu %xmm7, 32(%ecx)
vmovdqu %xmm4, 48(%ecx)
vmovdqu %xmm2, 32(%edx)
vmovdqu %xmm3, 48(%edx)
# pclmul_1
vmovdqu (%ecx), %xmm1
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vmovdqu 48(%esp), %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm5
vpclmulqdq $0x01, %xmm2, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
# pclmul_2
vmovdqu 16(%ecx), %xmm1
vmovdqu 32(%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# pclmul_n
vmovdqu 32(%ecx), %xmm1
vmovdqu 16(%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# pclmul_n
vmovdqu 48(%ecx), %xmm1
vmovdqu (%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# aesenc_pclmul_l
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm3, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm1
vpsrldq $8, %xmm5, %xmm5
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm0
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm6
# aesenc_64_ghash - end
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_avx2_ghash_64
L_AES_GCM_decrypt_avx2_ghash_64_done:
vmovdqu (%esp), %xmm5
vmovdqu 64(%esp), %xmm4
L_AES_GCM_decrypt_avx2_done_64:
cmpl 216(%esp), %ebx
jge L_AES_GCM_decrypt_avx2_done_dec
movl 216(%esp), %eax
andl $0xfffffff0, %eax
cmpl %eax, %ebx
jge L_AES_GCM_decrypt_avx2_last_block_done
L_AES_GCM_decrypt_avx2_last_block_start:
vmovdqu (%esi,%ebx,1), %xmm0
vpshufb L_aes_gcm_avx2_bswap_epi64, %xmm4, %xmm7
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm4
vmovdqu %xmm4, 64(%esp)
vpxor %xmm6, %xmm0, %xmm4
# aesenc_gfmul_sb
vpclmulqdq $0x01, %xmm5, %xmm4, %xmm2
vpclmulqdq $16, %xmm5, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm4, %xmm4
vpxor (%ebp), %xmm7, %xmm7
vaesenc 16(%ebp), %xmm7, %xmm7
vpxor %xmm2, %xmm3, %xmm3
vpslldq $8, %xmm3, %xmm2
vpsrldq $8, %xmm3, %xmm3
vaesenc 32(%ebp), %xmm7, %xmm7
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm2, %xmm1
vaesenc 48(%ebp), %xmm7, %xmm7
vaesenc 64(%ebp), %xmm7, %xmm7
vaesenc 80(%ebp), %xmm7, %xmm7
vpshufd $0x4e, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm2, %xmm1
vaesenc 96(%ebp), %xmm7, %xmm7
vaesenc 112(%ebp), %xmm7, %xmm7
vaesenc 128(%ebp), %xmm7, %xmm7
vpshufd $0x4e, %xmm2, %xmm2
vaesenc 144(%ebp), %xmm7, %xmm7
vpxor %xmm3, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
vmovdqu 160(%ebp), %xmm0
cmpl $11, 236(%esp)
jl L_AES_GCM_decrypt_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm7, %xmm7
vaesenc 176(%ebp), %xmm7, %xmm7
vmovdqu 192(%ebp), %xmm0
cmpl $13, 236(%esp)
jl L_AES_GCM_decrypt_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm7, %xmm7
vaesenc 208(%ebp), %xmm7, %xmm7
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_decrypt_avx2_aesenc_gfmul_sb_last:
vaesenclast %xmm0, %xmm7, %xmm7
vmovdqu (%esi,%ebx,1), %xmm3
vpxor %xmm1, %xmm2, %xmm6
vpxor %xmm3, %xmm7, %xmm7
vmovdqu %xmm7, (%edi,%ebx,1)
vmovdqu 64(%esp), %xmm4
addl $16, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_avx2_last_block_start
L_AES_GCM_decrypt_avx2_last_block_done:
movl 216(%esp), %ecx
movl 216(%esp), %edx
andl $15, %ecx
jz L_AES_GCM_decrypt_avx2_done_dec
# aesenc_last15_dec
vpshufb L_aes_gcm_avx2_bswap_epi64, %xmm4, %xmm4
vpxor (%ebp), %xmm4, %xmm4
vaesenc 16(%ebp), %xmm4, %xmm4
vaesenc 32(%ebp), %xmm4, %xmm4
vaesenc 48(%ebp), %xmm4, %xmm4
vaesenc 64(%ebp), %xmm4, %xmm4
vaesenc 80(%ebp), %xmm4, %xmm4
vaesenc 96(%ebp), %xmm4, %xmm4
vaesenc 112(%ebp), %xmm4, %xmm4
vaesenc 128(%ebp), %xmm4, %xmm4
vaesenc 144(%ebp), %xmm4, %xmm4
cmpl $11, 236(%esp)
vmovdqu 160(%ebp), %xmm1
jl L_AES_GCM_decrypt_avx2_aesenc_last15_dec_avx_aesenc_avx_last
vaesenc %xmm1, %xmm4, %xmm4
vaesenc 176(%ebp), %xmm4, %xmm4
cmpl $13, 236(%esp)
vmovdqu 192(%ebp), %xmm1
jl L_AES_GCM_decrypt_avx2_aesenc_last15_dec_avx_aesenc_avx_last
vaesenc %xmm1, %xmm4, %xmm4
vaesenc 208(%ebp), %xmm4, %xmm4
vmovdqu 224(%ebp), %xmm1
L_AES_GCM_decrypt_avx2_aesenc_last15_dec_avx_aesenc_avx_last:
vaesenclast %xmm1, %xmm4, %xmm4
xorl %ecx, %ecx
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm4, (%esp)
vmovdqu %xmm0, 16(%esp)
L_AES_GCM_decrypt_avx2_aesenc_last15_dec_avx_loop:
movzbl (%esi,%ebx,1), %eax
movb %al, 16(%esp,%ecx,1)
xorb (%esp,%ecx,1), %al
movb %al, (%edi,%ebx,1)
incl %ebx
incl %ecx
cmpl %edx, %ebx
jl L_AES_GCM_decrypt_avx2_aesenc_last15_dec_avx_loop
vmovdqu 16(%esp), %xmm4
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm4, %xmm4
vpxor %xmm4, %xmm6, %xmm6
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm6, %xmm2
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm1
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm0
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm6
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm0, %xmm6, %xmm6
L_AES_GCM_decrypt_avx2_done_dec:
vmovdqu 80(%esp), %xmm7
# calc_tag
movl 216(%esp), %ecx
shll $3, %ecx
vpinsrd $0x00, %ecx, %xmm0, %xmm0
movl 220(%esp), %ecx
shll $3, %ecx
vpinsrd $2, %ecx, %xmm0, %xmm0
movl 216(%esp), %ecx
shrl $29, %ecx
vpinsrd $0x01, %ecx, %xmm0, %xmm0
movl 220(%esp), %ecx
shrl $29, %ecx
vpinsrd $3, %ecx, %xmm0, %xmm0
vpxor %xmm6, %xmm0, %xmm0
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm0, %xmm4
vpclmulqdq $0x01, %xmm5, %xmm0, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm4, %xmm4
vpslldq $8, %xmm4, %xmm3
vpsrldq $8, %xmm4, %xmm4
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm0
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm4, %xmm0, %xmm0
vpxor %xmm3, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm7, %xmm0, %xmm0
movl 212(%esp), %edi
movl 228(%esp), %ebx
movl 240(%esp), %ebp
# cmp_tag
cmpl $16, %ebx
je L_AES_GCM_decrypt_avx2_cmp_tag_16
xorl %edx, %edx
xorl %ecx, %ecx
vmovdqu %xmm0, (%esp)
L_AES_GCM_decrypt_avx2_cmp_tag_loop:
movzbl (%esp,%edx,1), %eax
xorb (%edi,%edx,1), %al
orb %al, %cl
incl %edx
cmpl %ebx, %edx
jne L_AES_GCM_decrypt_avx2_cmp_tag_loop
cmpb $0x00, %cl
sete %cl
jmp L_AES_GCM_decrypt_avx2_cmp_tag_done
L_AES_GCM_decrypt_avx2_cmp_tag_16:
vmovdqu (%edi), %xmm1
vpcmpeqb %xmm1, %xmm0, %xmm0
vpmovmskb %xmm0, %edx
# %%edx == 0xFFFF then return 1 else => return 0
xorl %ecx, %ecx
cmpl $0xffff, %edx
sete %cl
L_AES_GCM_decrypt_avx2_cmp_tag_done:
movl %ecx, (%ebp)
addl $0xb0, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_decrypt_avx2,.-AES_GCM_decrypt_avx2
#ifdef WOLFSSL_AESGCM_STREAM
.text
.globl AES_GCM_init_avx2
.type AES_GCM_init_avx2,@function
.align 16
AES_GCM_init_avx2:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $32, %esp
movl 52(%esp), %ebp
movl 60(%esp), %esi
movl 76(%esp), %edi
vpxor %xmm4, %xmm4, %xmm4
movl 64(%esp), %edx
cmpl $12, %edx
je L_AES_GCM_init_avx2_iv_12
# Calculate values when IV is not 12 bytes
# H = Encrypt X(=0)
vmovdqu (%ebp), %xmm5
vaesenc 16(%ebp), %xmm5, %xmm5
vaesenc 32(%ebp), %xmm5, %xmm5
vaesenc 48(%ebp), %xmm5, %xmm5
vaesenc 64(%ebp), %xmm5, %xmm5
vaesenc 80(%ebp), %xmm5, %xmm5
vaesenc 96(%ebp), %xmm5, %xmm5
vaesenc 112(%ebp), %xmm5, %xmm5
vaesenc 128(%ebp), %xmm5, %xmm5
vaesenc 144(%ebp), %xmm5, %xmm5
cmpl $11, 56(%esp)
vmovdqu 160(%ebp), %xmm0
jl L_AES_GCM_init_avx2_calc_iv_1_aesenc_avx_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc 176(%ebp), %xmm5, %xmm5
cmpl $13, 56(%esp)
vmovdqu 192(%ebp), %xmm0
jl L_AES_GCM_init_avx2_calc_iv_1_aesenc_avx_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc 208(%ebp), %xmm5, %xmm5
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_init_avx2_calc_iv_1_aesenc_avx_last:
vaesenclast %xmm0, %xmm5, %xmm5
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm5, %xmm5
# Calc counter
# Initialization vector
cmpl $0x00, %edx
movl $0x00, %ecx
je L_AES_GCM_init_avx2_calc_iv_done
cmpl $16, %edx
jl L_AES_GCM_init_avx2_calc_iv_lt16
andl $0xfffffff0, %edx
L_AES_GCM_init_avx2_calc_iv_16_loop:
vmovdqu (%esi,%ecx,1), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm6
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm6, %xmm0
vpshufd $0x4e, %xmm6, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_init_avx2_calc_iv_16_loop
movl 64(%esp), %edx
cmpl %edx, %ecx
je L_AES_GCM_init_avx2_calc_iv_done
L_AES_GCM_init_avx2_calc_iv_lt16:
vpxor %xmm0, %xmm0, %xmm0
xorl %ebx, %ebx
vmovdqu %xmm0, (%esp)
L_AES_GCM_init_avx2_calc_iv_loop:
movzbl (%esi,%ecx,1), %eax
movb %al, (%esp,%ebx,1)
incl %ecx
incl %ebx
cmpl %edx, %ecx
jl L_AES_GCM_init_avx2_calc_iv_loop
vmovdqu (%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm6
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm6, %xmm0
vpshufd $0x4e, %xmm6, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
L_AES_GCM_init_avx2_calc_iv_done:
# T = Encrypt counter
vpxor %xmm0, %xmm0, %xmm0
shll $3, %edx
vpinsrd $0x00, %edx, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm6
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm6, %xmm0
vpshufd $0x4e, %xmm6, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm4, %xmm4
# Encrypt counter
vmovdqu (%ebp), %xmm7
vpxor %xmm4, %xmm7, %xmm7
vaesenc 16(%ebp), %xmm7, %xmm7
vaesenc 32(%ebp), %xmm7, %xmm7
vaesenc 48(%ebp), %xmm7, %xmm7
vaesenc 64(%ebp), %xmm7, %xmm7
vaesenc 80(%ebp), %xmm7, %xmm7
vaesenc 96(%ebp), %xmm7, %xmm7
vaesenc 112(%ebp), %xmm7, %xmm7
vaesenc 128(%ebp), %xmm7, %xmm7
vaesenc 144(%ebp), %xmm7, %xmm7
cmpl $11, 56(%esp)
vmovdqu 160(%ebp), %xmm0
jl L_AES_GCM_init_avx2_calc_iv_2_aesenc_avx_last
vaesenc %xmm0, %xmm7, %xmm7
vaesenc 176(%ebp), %xmm7, %xmm7
cmpl $13, 56(%esp)
vmovdqu 192(%ebp), %xmm0
jl L_AES_GCM_init_avx2_calc_iv_2_aesenc_avx_last
vaesenc %xmm0, %xmm7, %xmm7
vaesenc 208(%ebp), %xmm7, %xmm7
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_init_avx2_calc_iv_2_aesenc_avx_last:
vaesenclast %xmm0, %xmm7, %xmm7
jmp L_AES_GCM_init_avx2_iv_done
L_AES_GCM_init_avx2_iv_12:
# # Calculate values when IV is 12 bytes
# Set counter based on IV
vmovdqu L_avx2_aes_gcm_bswap_one, %xmm4
vmovdqu (%ebp), %xmm5
vpblendd $7, (%esi), %xmm4, %xmm4
# H = Encrypt X(=0) and T = Encrypt counter
vmovdqu 16(%ebp), %xmm6
vpxor %xmm5, %xmm4, %xmm7
vaesenc %xmm6, %xmm5, %xmm5
vaesenc %xmm6, %xmm7, %xmm7
vmovdqu 32(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 48(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 64(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 80(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 96(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 112(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 128(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 144(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
cmpl $11, 56(%esp)
vmovdqu 160(%ebp), %xmm0
jl L_AES_GCM_init_avx2_calc_iv_12_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 176(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
cmpl $13, 56(%esp)
vmovdqu 192(%ebp), %xmm0
jl L_AES_GCM_init_avx2_calc_iv_12_last
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 208(%ebp), %xmm0
vaesenc %xmm0, %xmm5, %xmm5
vaesenc %xmm0, %xmm7, %xmm7
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_init_avx2_calc_iv_12_last:
vaesenclast %xmm0, %xmm5, %xmm5
vaesenclast %xmm0, %xmm7, %xmm7
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm5, %xmm5
L_AES_GCM_init_avx2_iv_done:
vmovdqu %xmm7, (%edi)
movl 68(%esp), %ebp
movl 72(%esp), %edi
vpshufb L_aes_gcm_avx2_bswap_epi64, %xmm4, %xmm4
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm4
vmovdqu %xmm5, (%ebp)
vmovdqu %xmm4, (%edi)
addl $32, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_init_avx2,.-AES_GCM_init_avx2
.text
.globl AES_GCM_aad_update_avx2
.type AES_GCM_aad_update_avx2,@function
.align 16
AES_GCM_aad_update_avx2:
pushl %esi
pushl %edi
movl 12(%esp), %esi
movl 16(%esp), %edx
movl 20(%esp), %edi
movl 24(%esp), %eax
vmovdqu (%edi), %xmm4
vmovdqu (%eax), %xmm5
xorl %ecx, %ecx
L_AES_GCM_aad_update_avx2_16_loop:
vmovdqu (%esi,%ecx,1), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm6
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm6, %xmm0
vpshufd $0x4e, %xmm6, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
addl $16, %ecx
cmpl %edx, %ecx
jl L_AES_GCM_aad_update_avx2_16_loop
vmovdqu %xmm4, (%edi)
popl %edi
popl %esi
ret
.size AES_GCM_aad_update_avx2,.-AES_GCM_aad_update_avx2
.text
.globl AES_GCM_encrypt_block_avx2
.type AES_GCM_encrypt_block_avx2,@function
.align 16
AES_GCM_encrypt_block_avx2:
pushl %esi
pushl %edi
movl 12(%esp), %ecx
movl 16(%esp), %eax
movl 20(%esp), %edi
movl 24(%esp), %esi
movl 28(%esp), %edx
vmovdqu (%edx), %xmm3
# aesenc_block
vmovdqu %xmm3, %xmm1
vpshufb L_aes_gcm_avx2_bswap_epi64, %xmm1, %xmm0
vpaddd L_aes_gcm_avx2_one, %xmm1, %xmm1
vpxor (%ecx), %xmm0, %xmm0
vaesenc 16(%ecx), %xmm0, %xmm0
vaesenc 32(%ecx), %xmm0, %xmm0
vaesenc 48(%ecx), %xmm0, %xmm0
vaesenc 64(%ecx), %xmm0, %xmm0
vaesenc 80(%ecx), %xmm0, %xmm0
vaesenc 96(%ecx), %xmm0, %xmm0
vaesenc 112(%ecx), %xmm0, %xmm0
vaesenc 128(%ecx), %xmm0, %xmm0
vaesenc 144(%ecx), %xmm0, %xmm0
cmpl $11, %eax
vmovdqu 160(%ecx), %xmm2
jl L_AES_GCM_encrypt_block_avx2_aesenc_block_aesenc_avx_last
vaesenc %xmm2, %xmm0, %xmm0
vaesenc 176(%ecx), %xmm0, %xmm0
cmpl $13, %eax
vmovdqu 192(%ecx), %xmm2
jl L_AES_GCM_encrypt_block_avx2_aesenc_block_aesenc_avx_last
vaesenc %xmm2, %xmm0, %xmm0
vaesenc 208(%ecx), %xmm0, %xmm0
vmovdqu 224(%ecx), %xmm2
L_AES_GCM_encrypt_block_avx2_aesenc_block_aesenc_avx_last:
vaesenclast %xmm2, %xmm0, %xmm0
vmovdqu %xmm1, %xmm3
vmovdqu (%esi), %xmm1
vpxor %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, (%edi)
vmovdqu %xmm3, (%edx)
popl %edi
popl %esi
ret
.size AES_GCM_encrypt_block_avx2,.-AES_GCM_encrypt_block_avx2
.text
.globl AES_GCM_ghash_block_avx2
.type AES_GCM_ghash_block_avx2,@function
.align 16
AES_GCM_ghash_block_avx2:
movl 4(%esp), %edx
movl 8(%esp), %eax
movl 12(%esp), %ecx
vmovdqu (%eax), %xmm4
vmovdqu (%ecx), %xmm5
vmovdqu (%edx), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm4, %xmm4
# ghash_gfmul_avx
vpclmulqdq $16, %xmm4, %xmm5, %xmm2
vpclmulqdq $0x01, %xmm4, %xmm5, %xmm1
vpclmulqdq $0x00, %xmm4, %xmm5, %xmm0
vpclmulqdq $0x11, %xmm4, %xmm5, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm1, %xmm0, %xmm6
vpxor %xmm2, %xmm3, %xmm4
# ghash_mid
vpsrld $31, %xmm6, %xmm0
vpsrld $31, %xmm4, %xmm1
vpslld $0x01, %xmm6, %xmm6
vpslld $0x01, %xmm4, %xmm4
vpsrldq $12, %xmm0, %xmm2
vpslldq $4, %xmm0, %xmm0
vpslldq $4, %xmm1, %xmm1
vpor %xmm2, %xmm4, %xmm4
vpor %xmm0, %xmm6, %xmm6
vpor %xmm1, %xmm4, %xmm4
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm6, %xmm0
vpshufd $0x4e, %xmm6, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm4, %xmm4
vmovdqu %xmm4, (%eax)
ret
.size AES_GCM_ghash_block_avx2,.-AES_GCM_ghash_block_avx2
.text
.globl AES_GCM_encrypt_update_avx2
.type AES_GCM_encrypt_update_avx2,@function
.align 16
AES_GCM_encrypt_update_avx2:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $0x60, %esp
movl 144(%esp), %esi
vmovdqu (%esi), %xmm4
vmovdqu %xmm4, 64(%esp)
movl 136(%esp), %esi
movl 140(%esp), %ebp
vmovdqu (%esi), %xmm6
vmovdqu (%ebp), %xmm5
vmovdqu %xmm6, 80(%esp)
movl 116(%esp), %ebp
movl 124(%esp), %edi
movl 128(%esp), %esi
# Calculate H
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_aes_gcm_avx2_mod2_128, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
xorl %ebx, %ebx
cmpl $0x40, 132(%esp)
movl 132(%esp), %eax
jl L_AES_GCM_encrypt_update_avx2_done_64
andl $0xffffffc0, %eax
vmovdqu %xmm4, 64(%esp)
vmovdqu %xmm6, 80(%esp)
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm3
# H ^ 1
vmovdqu %xmm5, (%esp)
vmovdqu %xmm5, %xmm2
# H ^ 2
vpclmulqdq $0x00, %xmm2, %xmm2, %xmm5
vpclmulqdq $0x11, %xmm2, %xmm2, %xmm6
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm6, %xmm0
vmovdqu %xmm0, 16(%esp)
# H ^ 3
# ghash_gfmul_red
vpclmulqdq $16, %xmm0, %xmm2, %xmm6
vpclmulqdq $0x01, %xmm0, %xmm2, %xmm5
vpclmulqdq $0x00, %xmm0, %xmm2, %xmm4
vpxor %xmm5, %xmm6, %xmm6
vpslldq $8, %xmm6, %xmm5
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm0, %xmm2, %xmm1
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm1, 32(%esp)
# H ^ 4
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm5
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm6
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm6, %xmm2
vmovdqu %xmm2, 48(%esp)
vmovdqu 80(%esp), %xmm6
# First 64 bytes of input
# aesenc_64
# aesenc_ctr
vmovdqu 64(%esp), %xmm4
vmovdqu L_aes_gcm_avx2_bswap_epi64, %xmm7
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm1
vpshufb %xmm7, %xmm4, %xmm0
vpaddd L_aes_gcm_avx2_two, %xmm4, %xmm2
vpshufb %xmm7, %xmm1, %xmm1
vpaddd L_aes_gcm_avx2_three, %xmm4, %xmm3
vpshufb %xmm7, %xmm2, %xmm2
vpaddd L_aes_gcm_avx2_four, %xmm4, %xmm4
vpshufb %xmm7, %xmm3, %xmm3
# aesenc_xor
vmovdqu (%ebp), %xmm7
vmovdqu %xmm4, 64(%esp)
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm7, %xmm3, %xmm3
vmovdqu 16(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 32(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 48(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 64(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 80(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 96(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 112(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 128(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 144(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $11, 120(%esp)
vmovdqu 160(%ebp), %xmm7
jl L_AES_GCM_encrypt_update_avx2_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 176(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $13, 120(%esp)
vmovdqu 192(%ebp), %xmm7
jl L_AES_GCM_encrypt_update_avx2_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 208(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 224(%ebp), %xmm7
L_AES_GCM_encrypt_update_avx2_aesenc_64_enc_done:
# aesenc_last
vaesenclast %xmm7, %xmm0, %xmm0
vaesenclast %xmm7, %xmm1, %xmm1
vaesenclast %xmm7, %xmm2, %xmm2
vaesenclast %xmm7, %xmm3, %xmm3
vmovdqu (%esi), %xmm7
vmovdqu 16(%esi), %xmm4
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, (%edi)
vmovdqu %xmm1, 16(%edi)
vmovdqu 32(%esi), %xmm7
vmovdqu 48(%esi), %xmm4
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm4, %xmm3, %xmm3
vmovdqu %xmm2, 32(%edi)
vmovdqu %xmm3, 48(%edi)
cmpl $0x40, %eax
movl $0x40, %ebx
movl %esi, %ecx
movl %edi, %edx
jle L_AES_GCM_encrypt_update_avx2_end_64
# More 64 bytes of input
L_AES_GCM_encrypt_update_avx2_ghash_64:
# aesenc_64_ghash
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# aesenc_64
# aesenc_ctr
vmovdqu 64(%esp), %xmm4
vmovdqu L_aes_gcm_avx2_bswap_epi64, %xmm7
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm1
vpshufb %xmm7, %xmm4, %xmm0
vpaddd L_aes_gcm_avx2_two, %xmm4, %xmm2
vpshufb %xmm7, %xmm1, %xmm1
vpaddd L_aes_gcm_avx2_three, %xmm4, %xmm3
vpshufb %xmm7, %xmm2, %xmm2
vpaddd L_aes_gcm_avx2_four, %xmm4, %xmm4
vpshufb %xmm7, %xmm3, %xmm3
# aesenc_xor
vmovdqu (%ebp), %xmm7
vmovdqu %xmm4, 64(%esp)
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm7, %xmm3, %xmm3
vmovdqu 16(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 32(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 48(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 64(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 80(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 96(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 112(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 128(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 144(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $11, 120(%esp)
vmovdqu 160(%ebp), %xmm7
jl L_AES_GCM_encrypt_update_avx2_aesenc_64_ghash_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 176(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $13, 120(%esp)
vmovdqu 192(%ebp), %xmm7
jl L_AES_GCM_encrypt_update_avx2_aesenc_64_ghash_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 208(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 224(%ebp), %xmm7
L_AES_GCM_encrypt_update_avx2_aesenc_64_ghash_aesenc_64_enc_done:
# aesenc_last
vaesenclast %xmm7, %xmm0, %xmm0
vaesenclast %xmm7, %xmm1, %xmm1
vaesenclast %xmm7, %xmm2, %xmm2
vaesenclast %xmm7, %xmm3, %xmm3
vmovdqu (%ecx), %xmm7
vmovdqu 16(%ecx), %xmm4
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm0, (%edx)
vmovdqu %xmm1, 16(%edx)
vmovdqu 32(%ecx), %xmm7
vmovdqu 48(%ecx), %xmm4
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm4, %xmm3, %xmm3
vmovdqu %xmm2, 32(%edx)
vmovdqu %xmm3, 48(%edx)
# pclmul_1
vmovdqu -64(%edx), %xmm1
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vmovdqu 48(%esp), %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm5
vpclmulqdq $0x01, %xmm2, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
# pclmul_2
vmovdqu -48(%edx), %xmm1
vmovdqu 32(%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# pclmul_n
vmovdqu -32(%edx), %xmm1
vmovdqu 16(%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# pclmul_n
vmovdqu -16(%edx), %xmm1
vmovdqu (%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# aesenc_pclmul_l
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm3, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm1
vpsrldq $8, %xmm5, %xmm5
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm0
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm6
# aesenc_64_ghash - end
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_encrypt_update_avx2_ghash_64
L_AES_GCM_encrypt_update_avx2_end_64:
vmovdqu %xmm6, 80(%esp)
vmovdqu 48(%edx), %xmm3
vmovdqu (%esp), %xmm7
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm3, %xmm3
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
vpclmulqdq $0x01, %xmm3, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm3, %xmm7, %xmm4
vpclmulqdq $0x11, %xmm3, %xmm7, %xmm6
vpxor %xmm1, %xmm5, %xmm5
vmovdqu 32(%edx), %xmm3
vmovdqu 16(%esp), %xmm7
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm3, %xmm3
vpclmulqdq $16, %xmm3, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm3, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm3, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm3, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vmovdqu 16(%edx), %xmm3
vmovdqu 32(%esp), %xmm7
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm3, %xmm3
vpclmulqdq $16, %xmm3, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm3, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm3, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm3, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vmovdqu 80(%esp), %xmm0
vmovdqu (%edx), %xmm3
vmovdqu 48(%esp), %xmm7
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm3, %xmm3
vpxor %xmm0, %xmm3, %xmm3
vpclmulqdq $16, %xmm3, %xmm7, %xmm2
vpclmulqdq $0x01, %xmm3, %xmm7, %xmm1
vpclmulqdq $0x00, %xmm3, %xmm7, %xmm0
vpclmulqdq $0x11, %xmm3, %xmm7, %xmm3
vpxor %xmm1, %xmm2, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm0, %xmm4, %xmm4
vpslldq $8, %xmm5, %xmm7
vpsrldq $8, %xmm5, %xmm5
vpxor %xmm7, %xmm4, %xmm4
vpxor %xmm5, %xmm6, %xmm6
# ghash_red
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm2
vpclmulqdq $16, %xmm2, %xmm4, %xmm0
vpshufd $0x4e, %xmm4, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm6, %xmm6
vmovdqu (%esp), %xmm5
vmovdqu 64(%esp), %xmm4
L_AES_GCM_encrypt_update_avx2_done_64:
cmpl 132(%esp), %ebx
je L_AES_GCM_encrypt_update_avx2_done_enc
movl 132(%esp), %eax
andl $0xfffffff0, %eax
cmpl %eax, %ebx
jge L_AES_GCM_encrypt_update_avx2_last_block_done
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# aesenc_block
vmovdqu %xmm4, %xmm1
vpshufb L_aes_gcm_avx2_bswap_epi64, %xmm1, %xmm0
vpaddd L_aes_gcm_avx2_one, %xmm1, %xmm1
vpxor (%ebp), %xmm0, %xmm0
vaesenc 16(%ebp), %xmm0, %xmm0
vaesenc 32(%ebp), %xmm0, %xmm0
vaesenc 48(%ebp), %xmm0, %xmm0
vaesenc 64(%ebp), %xmm0, %xmm0
vaesenc 80(%ebp), %xmm0, %xmm0
vaesenc 96(%ebp), %xmm0, %xmm0
vaesenc 112(%ebp), %xmm0, %xmm0
vaesenc 128(%ebp), %xmm0, %xmm0
vaesenc 144(%ebp), %xmm0, %xmm0
cmpl $11, 120(%esp)
vmovdqu 160(%ebp), %xmm2
jl L_AES_GCM_encrypt_update_avx2_aesenc_block_aesenc_avx_last
vaesenc %xmm2, %xmm0, %xmm0
vaesenc 176(%ebp), %xmm0, %xmm0
cmpl $13, 120(%esp)
vmovdqu 192(%ebp), %xmm2
jl L_AES_GCM_encrypt_update_avx2_aesenc_block_aesenc_avx_last
vaesenc %xmm2, %xmm0, %xmm0
vaesenc 208(%ebp), %xmm0, %xmm0
vmovdqu 224(%ebp), %xmm2
L_AES_GCM_encrypt_update_avx2_aesenc_block_aesenc_avx_last:
vaesenclast %xmm2, %xmm0, %xmm0
vmovdqu %xmm1, %xmm4
vmovdqu (%ecx), %xmm1
vpxor %xmm1, %xmm0, %xmm0
vmovdqu %xmm0, (%edx)
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm0, %xmm6, %xmm6
addl $16, %ebx
cmpl %eax, %ebx
jge L_AES_GCM_encrypt_update_avx2_last_block_ghash
L_AES_GCM_encrypt_update_avx2_last_block_start:
vpshufb L_aes_gcm_avx2_bswap_epi64, %xmm4, %xmm7
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm4
vmovdqu %xmm4, 64(%esp)
# aesenc_gfmul_sb
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm2
vpclmulqdq $16, %xmm5, %xmm6, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm4
vpxor (%ebp), %xmm7, %xmm7
vaesenc 16(%ebp), %xmm7, %xmm7
vpxor %xmm2, %xmm3, %xmm3
vpslldq $8, %xmm3, %xmm2
vpsrldq $8, %xmm3, %xmm3
vaesenc 32(%ebp), %xmm7, %xmm7
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm2, %xmm1
vaesenc 48(%ebp), %xmm7, %xmm7
vaesenc 64(%ebp), %xmm7, %xmm7
vaesenc 80(%ebp), %xmm7, %xmm7
vpshufd $0x4e, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm2, %xmm1
vaesenc 96(%ebp), %xmm7, %xmm7
vaesenc 112(%ebp), %xmm7, %xmm7
vaesenc 128(%ebp), %xmm7, %xmm7
vpshufd $0x4e, %xmm2, %xmm2
vaesenc 144(%ebp), %xmm7, %xmm7
vpxor %xmm3, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
vmovdqu 160(%ebp), %xmm0
cmpl $11, 120(%esp)
jl L_AES_GCM_encrypt_update_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm7, %xmm7
vaesenc 176(%ebp), %xmm7, %xmm7
vmovdqu 192(%ebp), %xmm0
cmpl $13, 120(%esp)
jl L_AES_GCM_encrypt_update_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm7, %xmm7
vaesenc 208(%ebp), %xmm7, %xmm7
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_encrypt_update_avx2_aesenc_gfmul_sb_last:
vaesenclast %xmm0, %xmm7, %xmm7
vmovdqu (%esi,%ebx,1), %xmm3
vpxor %xmm1, %xmm2, %xmm6
vpxor %xmm3, %xmm7, %xmm7
vmovdqu %xmm7, (%edi,%ebx,1)
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm7, %xmm7
vpxor %xmm7, %xmm6, %xmm6
vmovdqu 64(%esp), %xmm4
addl $16, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_encrypt_update_avx2_last_block_start
L_AES_GCM_encrypt_update_avx2_last_block_ghash:
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm6, %xmm2
vpclmulqdq $0x01, %xmm5, %xmm6, %xmm1
vpclmulqdq $0x00, %xmm5, %xmm6, %xmm0
vpxor %xmm1, %xmm2, %xmm2
vpslldq $8, %xmm2, %xmm1
vpsrldq $8, %xmm2, %xmm2
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm6, %xmm6
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm0, %xmm1, %xmm1
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm1, %xmm0
vpshufd $0x4e, %xmm1, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm0, %xmm6, %xmm6
L_AES_GCM_encrypt_update_avx2_last_block_done:
L_AES_GCM_encrypt_update_avx2_done_enc:
movl 136(%esp), %esi
movl 144(%esp), %edi
vmovdqu %xmm6, (%esi)
vmovdqu %xmm4, (%edi)
addl $0x60, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_encrypt_update_avx2,.-AES_GCM_encrypt_update_avx2
.text
.globl AES_GCM_encrypt_final_avx2
.type AES_GCM_encrypt_final_avx2,@function
.align 16
AES_GCM_encrypt_final_avx2:
pushl %esi
pushl %edi
pushl %ebp
subl $16, %esp
movl 32(%esp), %ebp
movl 52(%esp), %esi
movl 56(%esp), %edi
vmovdqu (%ebp), %xmm4
vmovdqu (%esi), %xmm5
vmovdqu (%edi), %xmm6
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_aes_gcm_avx2_mod2_128, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
# calc_tag
movl 44(%esp), %ecx
shll $3, %ecx
vpinsrd $0x00, %ecx, %xmm0, %xmm0
movl 48(%esp), %ecx
shll $3, %ecx
vpinsrd $2, %ecx, %xmm0, %xmm0
movl 44(%esp), %ecx
shrl $29, %ecx
vpinsrd $0x01, %ecx, %xmm0, %xmm0
movl 48(%esp), %ecx
shrl $29, %ecx
vpinsrd $3, %ecx, %xmm0, %xmm0
vpxor %xmm4, %xmm0, %xmm0
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm0, %xmm7
vpclmulqdq $0x01, %xmm5, %xmm0, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm7, %xmm7
vpslldq $8, %xmm7, %xmm3
vpsrldq $8, %xmm7, %xmm7
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm0
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm3, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm6, %xmm0, %xmm0
movl 36(%esp), %edi
# store_tag
cmpl $16, 40(%esp)
je L_AES_GCM_encrypt_final_avx2_store_tag_16
xorl %ecx, %ecx
vmovdqu %xmm0, (%esp)
L_AES_GCM_encrypt_final_avx2_store_tag_loop:
movzbl (%esp,%ecx,1), %eax
movb %al, (%edi,%ecx,1)
incl %ecx
cmpl 40(%esp), %ecx
jne L_AES_GCM_encrypt_final_avx2_store_tag_loop
jmp L_AES_GCM_encrypt_final_avx2_store_tag_done
L_AES_GCM_encrypt_final_avx2_store_tag_16:
vmovdqu %xmm0, (%edi)
L_AES_GCM_encrypt_final_avx2_store_tag_done:
addl $16, %esp
popl %ebp
popl %edi
popl %esi
ret
.size AES_GCM_encrypt_final_avx2,.-AES_GCM_encrypt_final_avx2
.text
.globl AES_GCM_decrypt_update_avx2
.type AES_GCM_decrypt_update_avx2,@function
.align 16
AES_GCM_decrypt_update_avx2:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $0xa0, %esp
movl 208(%esp), %esi
vmovdqu (%esi), %xmm4
movl 200(%esp), %esi
movl 204(%esp), %ebp
vmovdqu (%esi), %xmm6
vmovdqu (%ebp), %xmm5
movl 180(%esp), %ebp
movl 188(%esp), %edi
movl 192(%esp), %esi
# Calculate H
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_aes_gcm_avx2_mod2_128, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
xorl %ebx, %ebx
cmpl $0x40, 196(%esp)
movl 196(%esp), %eax
jl L_AES_GCM_decrypt_update_avx2_done_64
andl $0xffffffc0, %eax
vmovdqu %xmm4, 64(%esp)
vmovdqu %xmm6, 80(%esp)
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm3
# H ^ 1
vmovdqu %xmm5, (%esp)
vmovdqu %xmm5, %xmm2
# H ^ 2
vpclmulqdq $0x00, %xmm2, %xmm2, %xmm5
vpclmulqdq $0x11, %xmm2, %xmm2, %xmm6
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm6, %xmm0
vmovdqu %xmm0, 16(%esp)
# H ^ 3
# ghash_gfmul_red
vpclmulqdq $16, %xmm0, %xmm2, %xmm6
vpclmulqdq $0x01, %xmm0, %xmm2, %xmm5
vpclmulqdq $0x00, %xmm0, %xmm2, %xmm4
vpxor %xmm5, %xmm6, %xmm6
vpslldq $8, %xmm6, %xmm5
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $0x11, %xmm0, %xmm2, %xmm1
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm6, %xmm1, %xmm1
vpxor %xmm5, %xmm1, %xmm1
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm1, 32(%esp)
# H ^ 4
vpclmulqdq $0x00, %xmm0, %xmm0, %xmm5
vpclmulqdq $0x11, %xmm0, %xmm0, %xmm6
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpclmulqdq $16, %xmm3, %xmm5, %xmm4
vpshufd $0x4e, %xmm5, %xmm5
vpxor %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm6, %xmm2
vmovdqu %xmm2, 48(%esp)
vmovdqu 80(%esp), %xmm6
cmpl %esi, %edi
jne L_AES_GCM_decrypt_update_avx2_ghash_64
L_AES_GCM_decrypt_update_avx2_ghash_64_inplace:
# aesenc_64_ghash
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# aesenc_64
# aesenc_ctr
vmovdqu 64(%esp), %xmm4
vmovdqu L_aes_gcm_avx2_bswap_epi64, %xmm7
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm1
vpshufb %xmm7, %xmm4, %xmm0
vpaddd L_aes_gcm_avx2_two, %xmm4, %xmm2
vpshufb %xmm7, %xmm1, %xmm1
vpaddd L_aes_gcm_avx2_three, %xmm4, %xmm3
vpshufb %xmm7, %xmm2, %xmm2
vpaddd L_aes_gcm_avx2_four, %xmm4, %xmm4
vpshufb %xmm7, %xmm3, %xmm3
# aesenc_xor
vmovdqu (%ebp), %xmm7
vmovdqu %xmm4, 64(%esp)
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm7, %xmm3, %xmm3
vmovdqu 16(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 32(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 48(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 64(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 80(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 96(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 112(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 128(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 144(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $11, 184(%esp)
vmovdqu 160(%ebp), %xmm7
jl L_AES_GCM_decrypt_update_avx2_inplace_aesenc_64_ghash_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 176(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $13, 184(%esp)
vmovdqu 192(%ebp), %xmm7
jl L_AES_GCM_decrypt_update_avx2_inplace_aesenc_64_ghash_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 208(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 224(%ebp), %xmm7
L_AES_GCM_decrypt_update_avx2_inplace_aesenc_64_ghash_aesenc_64_enc_done:
# aesenc_last
vaesenclast %xmm7, %xmm0, %xmm0
vaesenclast %xmm7, %xmm1, %xmm1
vaesenclast %xmm7, %xmm2, %xmm2
vaesenclast %xmm7, %xmm3, %xmm3
vmovdqu (%ecx), %xmm7
vmovdqu 16(%ecx), %xmm4
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm7, 96(%esp)
vmovdqu %xmm4, 112(%esp)
vmovdqu %xmm0, (%edx)
vmovdqu %xmm1, 16(%edx)
vmovdqu 32(%ecx), %xmm7
vmovdqu 48(%ecx), %xmm4
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm4, %xmm3, %xmm3
vmovdqu %xmm7, 128(%esp)
vmovdqu %xmm4, 144(%esp)
vmovdqu %xmm2, 32(%edx)
vmovdqu %xmm3, 48(%edx)
# pclmul_1
vmovdqu 96(%esp), %xmm1
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vmovdqu 48(%esp), %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm5
vpclmulqdq $0x01, %xmm2, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
# pclmul_2
vmovdqu 112(%esp), %xmm1
vmovdqu 32(%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# pclmul_n
vmovdqu 128(%esp), %xmm1
vmovdqu 16(%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# pclmul_n
vmovdqu 144(%esp), %xmm1
vmovdqu (%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# aesenc_pclmul_l
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm3, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm1
vpsrldq $8, %xmm5, %xmm5
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm0
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm6
# aesenc_64_ghash - end
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_update_avx2_ghash_64_inplace
jmp L_AES_GCM_decrypt_update_avx2_ghash_64_done
L_AES_GCM_decrypt_update_avx2_ghash_64:
# aesenc_64_ghash
leal (%esi,%ebx,1), %ecx
leal (%edi,%ebx,1), %edx
# aesenc_64
# aesenc_ctr
vmovdqu 64(%esp), %xmm4
vmovdqu L_aes_gcm_avx2_bswap_epi64, %xmm7
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm1
vpshufb %xmm7, %xmm4, %xmm0
vpaddd L_aes_gcm_avx2_two, %xmm4, %xmm2
vpshufb %xmm7, %xmm1, %xmm1
vpaddd L_aes_gcm_avx2_three, %xmm4, %xmm3
vpshufb %xmm7, %xmm2, %xmm2
vpaddd L_aes_gcm_avx2_four, %xmm4, %xmm4
vpshufb %xmm7, %xmm3, %xmm3
# aesenc_xor
vmovdqu (%ebp), %xmm7
vmovdqu %xmm4, 64(%esp)
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm7, %xmm1, %xmm1
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm7, %xmm3, %xmm3
vmovdqu 16(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 32(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 48(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 64(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 80(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 96(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 112(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 128(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 144(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $11, 184(%esp)
vmovdqu 160(%ebp), %xmm7
jl L_AES_GCM_decrypt_update_avx2_aesenc_64_ghash_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 176(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
cmpl $13, 184(%esp)
vmovdqu 192(%ebp), %xmm7
jl L_AES_GCM_decrypt_update_avx2_aesenc_64_ghash_aesenc_64_enc_done
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 208(%ebp), %xmm7
vaesenc %xmm7, %xmm0, %xmm0
vaesenc %xmm7, %xmm1, %xmm1
vaesenc %xmm7, %xmm2, %xmm2
vaesenc %xmm7, %xmm3, %xmm3
vmovdqu 224(%ebp), %xmm7
L_AES_GCM_decrypt_update_avx2_aesenc_64_ghash_aesenc_64_enc_done:
# aesenc_last
vaesenclast %xmm7, %xmm0, %xmm0
vaesenclast %xmm7, %xmm1, %xmm1
vaesenclast %xmm7, %xmm2, %xmm2
vaesenclast %xmm7, %xmm3, %xmm3
vmovdqu (%ecx), %xmm7
vmovdqu 16(%ecx), %xmm4
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm4, %xmm1, %xmm1
vmovdqu %xmm7, (%ecx)
vmovdqu %xmm4, 16(%ecx)
vmovdqu %xmm0, (%edx)
vmovdqu %xmm1, 16(%edx)
vmovdqu 32(%ecx), %xmm7
vmovdqu 48(%ecx), %xmm4
vpxor %xmm7, %xmm2, %xmm2
vpxor %xmm4, %xmm3, %xmm3
vmovdqu %xmm7, 32(%ecx)
vmovdqu %xmm4, 48(%ecx)
vmovdqu %xmm2, 32(%edx)
vmovdqu %xmm3, 48(%edx)
# pclmul_1
vmovdqu (%ecx), %xmm1
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vmovdqu 48(%esp), %xmm2
vpxor %xmm6, %xmm1, %xmm1
vpclmulqdq $16, %xmm2, %xmm1, %xmm5
vpclmulqdq $0x01, %xmm2, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm2, %xmm1, %xmm6
vpclmulqdq $0x11, %xmm2, %xmm1, %xmm7
# pclmul_2
vmovdqu 16(%ecx), %xmm1
vmovdqu 32(%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# pclmul_n
vmovdqu 32(%ecx), %xmm1
vmovdqu 16(%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# pclmul_n
vmovdqu 48(%ecx), %xmm1
vmovdqu (%esp), %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm1, %xmm1
vpxor %xmm2, %xmm5, %xmm5
vpclmulqdq $16, %xmm0, %xmm1, %xmm2
vpxor %xmm3, %xmm5, %xmm5
vpclmulqdq $0x01, %xmm0, %xmm1, %xmm3
vpxor %xmm4, %xmm6, %xmm6
vpclmulqdq $0x00, %xmm0, %xmm1, %xmm4
vpclmulqdq $0x11, %xmm0, %xmm1, %xmm1
vpxor %xmm1, %xmm7, %xmm7
# aesenc_pclmul_l
vpxor %xmm2, %xmm5, %xmm5
vpxor %xmm4, %xmm6, %xmm6
vpxor %xmm3, %xmm5, %xmm5
vpslldq $8, %xmm5, %xmm1
vpsrldq $8, %xmm5, %xmm5
vmovdqu L_aes_gcm_avx2_mod2_128, %xmm0
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm0, %xmm6, %xmm3
vpshufd $0x4e, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm7, %xmm6, %xmm6
# aesenc_64_ghash - end
addl $0x40, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_update_avx2_ghash_64
L_AES_GCM_decrypt_update_avx2_ghash_64_done:
vmovdqu (%esp), %xmm5
vmovdqu 64(%esp), %xmm4
L_AES_GCM_decrypt_update_avx2_done_64:
cmpl 196(%esp), %ebx
jge L_AES_GCM_decrypt_update_avx2_done_dec
movl 196(%esp), %eax
andl $0xfffffff0, %eax
cmpl %eax, %ebx
jge L_AES_GCM_decrypt_update_avx2_last_block_done
L_AES_GCM_decrypt_update_avx2_last_block_start:
vmovdqu (%esi,%ebx,1), %xmm0
vpshufb L_aes_gcm_avx2_bswap_epi64, %xmm4, %xmm7
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpaddd L_aes_gcm_avx2_one, %xmm4, %xmm4
vmovdqu %xmm4, 64(%esp)
vpxor %xmm6, %xmm0, %xmm4
# aesenc_gfmul_sb
vpclmulqdq $0x01, %xmm5, %xmm4, %xmm2
vpclmulqdq $16, %xmm5, %xmm4, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm4, %xmm1
vpclmulqdq $0x11, %xmm5, %xmm4, %xmm4
vpxor (%ebp), %xmm7, %xmm7
vaesenc 16(%ebp), %xmm7, %xmm7
vpxor %xmm2, %xmm3, %xmm3
vpslldq $8, %xmm3, %xmm2
vpsrldq $8, %xmm3, %xmm3
vaesenc 32(%ebp), %xmm7, %xmm7
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm2, %xmm1
vaesenc 48(%ebp), %xmm7, %xmm7
vaesenc 64(%ebp), %xmm7, %xmm7
vaesenc 80(%ebp), %xmm7, %xmm7
vpshufd $0x4e, %xmm2, %xmm2
vpxor %xmm1, %xmm2, %xmm2
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm2, %xmm1
vaesenc 96(%ebp), %xmm7, %xmm7
vaesenc 112(%ebp), %xmm7, %xmm7
vaesenc 128(%ebp), %xmm7, %xmm7
vpshufd $0x4e, %xmm2, %xmm2
vaesenc 144(%ebp), %xmm7, %xmm7
vpxor %xmm3, %xmm4, %xmm4
vpxor %xmm4, %xmm2, %xmm2
vmovdqu 160(%ebp), %xmm0
cmpl $11, 184(%esp)
jl L_AES_GCM_decrypt_update_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm7, %xmm7
vaesenc 176(%ebp), %xmm7, %xmm7
vmovdqu 192(%ebp), %xmm0
cmpl $13, 184(%esp)
jl L_AES_GCM_decrypt_update_avx2_aesenc_gfmul_sb_last
vaesenc %xmm0, %xmm7, %xmm7
vaesenc 208(%ebp), %xmm7, %xmm7
vmovdqu 224(%ebp), %xmm0
L_AES_GCM_decrypt_update_avx2_aesenc_gfmul_sb_last:
vaesenclast %xmm0, %xmm7, %xmm7
vmovdqu (%esi,%ebx,1), %xmm3
vpxor %xmm1, %xmm2, %xmm6
vpxor %xmm3, %xmm7, %xmm7
vmovdqu %xmm7, (%edi,%ebx,1)
vmovdqu 64(%esp), %xmm4
addl $16, %ebx
cmpl %eax, %ebx
jl L_AES_GCM_decrypt_update_avx2_last_block_start
L_AES_GCM_decrypt_update_avx2_last_block_done:
L_AES_GCM_decrypt_update_avx2_done_dec:
movl 200(%esp), %esi
movl 208(%esp), %edi
vmovdqu 64(%esp), %xmm4
vmovdqu %xmm6, (%esi)
vmovdqu %xmm4, (%edi)
addl $0xa0, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_decrypt_update_avx2,.-AES_GCM_decrypt_update_avx2
.text
.globl AES_GCM_decrypt_final_avx2
.type AES_GCM_decrypt_final_avx2,@function
.align 16
AES_GCM_decrypt_final_avx2:
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
subl $16, %esp
movl 36(%esp), %ebp
movl 56(%esp), %esi
movl 60(%esp), %edi
vmovdqu (%ebp), %xmm4
vmovdqu (%esi), %xmm5
vmovdqu (%edi), %xmm6
vpsrlq $63, %xmm5, %xmm1
vpsllq $0x01, %xmm5, %xmm0
vpslldq $8, %xmm1, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpshufd $0xff, %xmm5, %xmm5
vpsrad $31, %xmm5, %xmm5
vpand L_aes_gcm_avx2_mod2_128, %xmm5, %xmm5
vpxor %xmm0, %xmm5, %xmm5
# calc_tag
movl 48(%esp), %ecx
shll $3, %ecx
vpinsrd $0x00, %ecx, %xmm0, %xmm0
movl 52(%esp), %ecx
shll $3, %ecx
vpinsrd $2, %ecx, %xmm0, %xmm0
movl 48(%esp), %ecx
shrl $29, %ecx
vpinsrd $0x01, %ecx, %xmm0, %xmm0
movl 52(%esp), %ecx
shrl $29, %ecx
vpinsrd $3, %ecx, %xmm0, %xmm0
vpxor %xmm4, %xmm0, %xmm0
# ghash_gfmul_red
vpclmulqdq $16, %xmm5, %xmm0, %xmm7
vpclmulqdq $0x01, %xmm5, %xmm0, %xmm3
vpclmulqdq $0x00, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm7, %xmm7
vpslldq $8, %xmm7, %xmm3
vpsrldq $8, %xmm7, %xmm7
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $0x11, %xmm5, %xmm0, %xmm0
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm2, %xmm3, %xmm3
vpclmulqdq $16, L_aes_gcm_avx2_mod2_128, %xmm3, %xmm2
vpshufd $0x4e, %xmm3, %xmm3
vpxor %xmm7, %xmm0, %xmm0
vpxor %xmm3, %xmm0, %xmm0
vpxor %xmm2, %xmm0, %xmm0
vpshufb L_aes_gcm_avx2_bswap_mask, %xmm0, %xmm0
vpxor %xmm6, %xmm0, %xmm0
movl 40(%esp), %esi
movl 64(%esp), %edi
# cmp_tag
cmpl $16, 44(%esp)
je L_AES_GCM_decrypt_final_avx2_cmp_tag_16
xorl %ecx, %ecx
xorl %edx, %edx
vmovdqu %xmm0, (%esp)
L_AES_GCM_decrypt_final_avx2_cmp_tag_loop:
movzbl (%esp,%ecx,1), %eax
xorb (%esi,%ecx,1), %al
orb %al, %dl
incl %ecx
cmpl 44(%esp), %ecx
jne L_AES_GCM_decrypt_final_avx2_cmp_tag_loop
cmpb $0x00, %dl
sete %dl
jmp L_AES_GCM_decrypt_final_avx2_cmp_tag_done
L_AES_GCM_decrypt_final_avx2_cmp_tag_16:
vmovdqu (%esi), %xmm1
vpcmpeqb %xmm1, %xmm0, %xmm0
vpmovmskb %xmm0, %ecx
# %%edx == 0xFFFF then return 1 else => return 0
xorl %edx, %edx
cmpl $0xffff, %ecx
sete %dl
L_AES_GCM_decrypt_final_avx2_cmp_tag_done:
movl %edx, (%edi)
addl $16, %esp
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
.size AES_GCM_decrypt_final_avx2,.-AES_GCM_decrypt_final_avx2
#endif /* WOLFSSL_AESGCM_STREAM */
#endif /* HAVE_INTEL_AVX2 */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/IWDG/IWDG_WindowMode/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,444
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/CRC/CRC_Data_Reversing_16bit_CRC/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/CRC/CRC_Data_Reversing_16bit_CRC/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples/CRC/CRC_Data_Reversing_16bit_CRC/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.