repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
vandercookking/h7_device_RTT
| 2,589
|
rt-thread/libcpu/arm/s3c24x0/context_rvds.S
|
;/*
; * Copyright (c) 2006-2022, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-20 Bernard first version
; */
NOINT EQU 0xc0 ; disable interrupt in psr
AREA |.text|, CODE, READONLY, ALIGN=2
ARM
REQUIRE8
PRESERVE8
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, cpsr
ORR r1, r0, #NOINT
MSR cpsr_c, r1
BX lr
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR cpsr_c, r0
BX lr
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
STMFD sp!, {lr} ; push pc (lr should be pushed in place of PC)
STMFD sp!, {r0-r12, lr} ; push lr & register file
MRS r4, cpsr
STMFD sp!, {r4} ; push cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push spsr
STR sp, [r0] ; store sp in preempted tasks TCB
LDR sp, [r1] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR spsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc}^ ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
LDR sp, [r0] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
; */
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
rt_hw_context_switch_interrupt PROC
EXPORT rt_hw_context_switch_interrupt
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
BX lr
ENDP
END
|
vandercookking/h7_device_RTT
| 53,898
|
rt-thread/libcpu/arm/s3c24x0/start_rvds.S
|
;/*****************************************************************************/
;/* S3C2440.S: Startup file for Samsung S3C440 */
;/*****************************************************************************/
;/* <<< Use Configuration Wizard in Context Menu >>> */
;/*****************************************************************************/
;/* This file is part of the uVision/ARM development tools. */
;/* Copyright (c) 2005-2008 Keil Software. All rights reserved. */
;/* This software may only be used under the terms of a valid, current, */
;/* end user licence from KEIL for a compatible version of KEIL software */
;/* development tools. Nothing else gives you the right to use this software. */
;/*****************************************************************************/
;/*
; * The S3C2440.S code is executed after CPU Reset. This file may be
; * translated with the following SET symbols. In uVision these SET
; * symbols are entered under Options - ASM - Define.
; *
; * NO_CLOCK_SETUP: when set the startup code will not initialize Clock
; * (used mostly when clock is already initialized from script .ini
; * file).
; *
; * NO_MC_SETUP: when set the startup code will not initialize Memory
; * Controller (used mostly when clock is already initialized from script
; * .ini file).
; *
; * NO_GP_SETUP: when set the startup code will not initialize General Ports
; * (used mostly when clock is already initialized from script .ini
; * file).
; *
; * RAM_INTVEC: when set the startup code copies exception vectors
; * from execution address to on-chip RAM.
; */
; Standard definitions of Mode bits and Interrupt (I & F) flags in PSRs
Mode_USR EQU 0x10
Mode_FIQ EQU 0x11
Mode_IRQ EQU 0x12
Mode_SVC EQU 0x13
Mode_ABT EQU 0x17
Mode_UND EQU 0x1B
Mode_SYS EQU 0x1F
I_Bit EQU 0x80 ; when I bit is set, IRQ is disabled
F_Bit EQU 0x40 ; when F bit is set, FIQ is disabled
;----------------------- Stack and Heap Definitions ----------------------------
;// <h> Stack Configuration (Stack Sizes in Bytes)
;// <o0> Undefined Mode <0x0-0xFFFFFFFF:8>
;// <o1> Supervisor Mode <0x0-0xFFFFFFFF:8>
;// <o2> Abort Mode <0x0-0xFFFFFFFF:8>
;// <o3> Fast Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o4> Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o5> User/System Mode <0x0-0xFFFFFFFF:8>
;// </h>
UND_Stack_Size EQU 0x00000000
SVC_Stack_Size EQU 0x00000100
ABT_Stack_Size EQU 0x00000000
FIQ_Stack_Size EQU 0x00000000
IRQ_Stack_Size EQU 0x00000100
USR_Stack_Size EQU 0x00000100
ISR_Stack_Size EQU (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
FIQ_Stack_Size + IRQ_Stack_Size)
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE USR_Stack_Size
__initial_sp SPACE ISR_Stack_Size
Stack_Top
;// <h> Heap Configuration
;// <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF>
;// </h>
Heap_Size EQU 0x00000000
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
;----------------------- Memory Definitions ------------------------------------
; Internal Memory Base Addresses
IRAM_BASE EQU 0x40000000
;----------------------- Watchdog Timer Definitions ----------------------------
WT_BASE EQU 0x53000000 ; Watchdog Timer Base Address
WTCON_OFS EQU 0x00 ; Watchdog Timer Control Register Offset
WTDAT_OFS EQU 0x04 ; Watchdog Timer Data Register Offset
WTCNT_OFS EQU 0x08 ; Watchdog Timer Count Register Offset
;// <e> Watchdog Timer Setup
;// <h> Watchdog Timer Control Register (WTCON)
;// <o1.8..15> Prescaler Value <0-255>
;// <o1.5> Watchdog Timer Enable
;// <o1.3..4> Clock Division Factor
;// <0=> 16 <1=> 32 <2=> 64 <3=> 128
;// <o1.2> Interrupt Generation Enable
;// <o1.0> Reset Enable
;// </h>
;// <h> Watchdog Timer Data Register (WTDAT)
;// <o2.0..15> Count Reload Value <0-65535>
;// </h>
;// </e> Watchdog Timer Setup
WT_SETUP EQU 1
WTCON_Val EQU 0x00000000
WTDAT_Val EQU 0x00008000
;----------------------- Clock and Power Management Definitions ----------------
CLOCK_BASE EQU 0x4C000000 ; Clock Base Address
LOCKTIME_OFS EQU 0x00 ; PLL Lock Time Count Register Offset
MPLLCON_OFS EQU 0x04 ; MPLL Configuration Register Offset
UPLLCON_OFS EQU 0x08 ; UPLL Configuration Register Offset
CLKCON_OFS EQU 0x0C ; Clock Generator Control Reg Offset
CLKSLOW_OFS EQU 0x10 ; Clock Slow Control Register Offset
CLKDIVN_OFS EQU 0x14 ; Clock Divider Control Register Offset
CAMDIVN_OFS EQU 0x18 ; Camera Clock Divider Register Offset
;// <e> Clock Setup
;// <h> PLL Lock Time Count Register (LOCKTIME)
;// <o1.16..31> U_LTIME: UPLL Lock Time Count Value for UCLK <0x0-0xFFFF>
;// <o1.0..15> M_LTIME: MPLL Lock Time Count Value for FCLK, HCLK and PCLK <0x0-0xFFFF>
;// </h>
;// <h> MPLL Configuration Register (MPLLCON)
;// <i> MPLL = (2 * m * Fin) / (p * 2^s)
;// <o2.12..19> m: Main Divider m Value <9-256><#-8>
;// <i> m = MDIV + 8
;// <o2.4..9> p: Pre-divider p Value <3-64><#-2>
;// <i> p = PDIV + 2
;// <o2.0..1> s: Post Divider s Value <0-3>
;// <i> s = SDIV
;// </h>
;// <h> UPLL Configuration Register (UPLLCON)
;// <i> UPLL = ( m * Fin) / (p * 2^s)
;// <o3.12..19> m: Main Divider m Value <8-263><#-8>
;// <i> m = MDIV + 8
;// <o3.4..9> p: Pre-divider p Value <2-65><#-2>
;// <i> p = PDIV + 2
;// <o3.0..1> s: Post Divider s Value <0-3>
;// <i> s = SDIV
;// </h>
;// <h> Clock Generation Control Register (CLKCON)
;// <o4.20> AC97 Enable
;// <o4.19> Camera Enable
;// <o4.18> SPI Enable
;// <o4.17> IIS Enable
;// <o4.16> IIC Enable
;// <o4.15> ADC + Touch Screen Enable
;// <o4.14> RTC Enable
;// <o4.13> GPIO Enable
;// <o4.12> UART2 Enable
;// <o4.11> UART1 Enable
;// <o4.10> UART0 Enable
;// <o4.9> SDI Enable
;// <o4.8> PWMTIMER Enable
;// <o4.7> USB Device Enable
;// <o4.6> USB Host Enable
;// <o4.5> LCDC Enable
;// <o4.4> NAND FLASH Controller Enable
;// <o4.3> SLEEP Enable
;// <o4.2> IDLE BIT Enable
;// </h>
;// <h> Clock Slow Control Register (CLKSLOW)
;// <o5.7> UCLK_ON: UCLK ON
;// <o5.5> MPLL_OFF: Turn off PLL
;// <o5.4> SLOW_BIT: Slow Mode Enable
;// <o5.0..2> SLOW_VAL: Slow Clock Divider <0-7>
;// </h>
;// <h> Clock Divider Control Register (CLKDIVN)
;// <o6.3> DIVN_UPLL: UCLK Select
;// <0=> UCLK = UPLL clock
;// <1=> UCLK = UPLL clock / 2
;// <o6.1..2> HDIVN: HCLK Select
;// <0=> HCLK = FCLK
;// <1=> HCLK = FCLK / 2
;// <2=> HCLK = FCLK / 4 if HCLK4_HALF = 0 in CAMDIVN, else HCLK = FCLK / 8
;// <3=> HCLK = FCLK / 3 if HCLK3_HALF = 0 in CAMDIVN, else HCLK = FCLK / 6
;// <o6.0> PDIVN: PCLK Select
;// <0=> PCLK = HCLK
;// <1=> PCLK = HCLK / 2
;// </h>
;// <h> Camera Clock Divider Control Register (CAMDIVN)
;// <o7.12> DVS_EN: ARM Core Clock Select
;// <0=> ARM core runs at FCLK
;// <1=> ARM core runs at HCLK
;// <o7.9> HCLK4_HALF: HDIVN Division Rate Change Bit
;// <0=> If HDIVN = 2 in CLKDIVN then HCLK = FCLK / 4
;// <1=> If HDIVN = 2 in CLKDIVN then HCLK = FCLK / 8
;// <o7.8> HCLK3_HALF: HDIVN Division Rate Change Bit
;// <0=> If HDIVN = 3 in CLKDIVN then HCLK = FCLK / 3
;// <1=> If HDIVN = 3 in CLKDIVN then HCLK = FCLK / 6
;// <o7.4> CAMCLK Select
;// <0=> CAMCLK = UPLL
;// <1=> CAMCLK = UPLL / CAMCLK_DIV
;// <o7.0..3> CAMCLK_DIV: CAMCLK Divider <0-15>
;// <i> Camera Clock = UPLL / (2 * (CAMCLK_DIV + 1))
;// <i> Divider is used only if CAMCLK_SEL = 1
;// </h>
;// </e> Clock Setup
CLOCK_SETUP EQU 0
LOCKTIME_Val EQU 0x0FFF0FFF
MPLLCON_Val EQU 0x00043011
UPLLCON_Val EQU 0x00038021
CLKCON_Val EQU 0x001FFFF0
CLKSLOW_Val EQU 0x00000004
CLKDIVN_Val EQU 0x0000000F
CAMDIVN_Val EQU 0x00000000
;----------------------- Memory Controller Definitions -------------------------
MC_BASE EQU 0x48000000 ; Memory Controller Base Address
BWSCON_OFS EQU 0x00 ; Bus Width and Wait Status Ctrl Offset
BANKCON0_OFS EQU 0x04 ; Bank 0 Control Register Offset
BANKCON1_OFS EQU 0x08 ; Bank 1 Control Register Offset
BANKCON2_OFS EQU 0x0C ; Bank 2 Control Register Offset
BANKCON3_OFS EQU 0x10 ; Bank 3 Control Register Offset
BANKCON4_OFS EQU 0x14 ; Bank 4 Control Register Offset
BANKCON5_OFS EQU 0x18 ; Bank 5 Control Register Offset
BANKCON6_OFS EQU 0x1C ; Bank 6 Control Register Offset
BANKCON7_OFS EQU 0x20 ; Bank 7 Control Register Offset
REFRESH_OFS EQU 0x24 ; SDRAM Refresh Control Register Offset
BANKSIZE_OFS EQU 0x28 ; Flexible Bank Size Register Offset
MRSRB6_OFS EQU 0x2C ; Bank 6 Mode Register Offset
MRSRB7_OFS EQU 0x30 ; Bank 7 Mode Register Offset
;// <e> Memory Controller Setup
;// <h> Bus Width and Wait Control Register (BWSCON)
;// <o1.31> ST7: Use UB/LB for Bank 7
;// <o1.30> WS7: Enable Wait Status for Bank 7
;// <o1.28..29> DW7: Data Bus Width for Bank 7
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Reserved
;// <o1.27> ST6: Use UB/LB for Bank 6
;// <o1.26> WS6: Enable Wait Status for Bank 6
;// <o1.24..25> DW6: Data Bus Width for Bank 6
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Reserved
;// <o1.23> ST5: Use UB/LB for Bank 5
;// <o1.22> WS5: Enable Wait Status for Bank 5
;// <o1.20..21> DW5: Data Bus Width for Bank 5
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Reserved
;// <o1.19> ST4: Use UB/LB for Bank 4
;// <o1.18> WS4: Enable Wait Status for Bank 4
;// <o1.16..17> DW4: Data Bus Width for Bank 4
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Reserved
;// <o1.15> ST3: Use UB/LB for Bank 3
;// <o1.14> WS3: Enable Wait Status for Bank 3
;// <o1.12..13> DW3: Data Bus Width for Bank 3
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Reserved
;// <o1.11> ST2: Use UB/LB for Bank 2
;// <o1.10> WS2: Enable Wait Status for Bank 2
;// <o1.8..9> DW2: Data Bus Width for Bank 2
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Reserved
;// <o1.7> ST1: Use UB/LB for Bank 1
;// <o1.6> WS1: Enable Wait Status for Bank 1
;// <o1.4..5> DW1: Data Bus Width for Bank 1
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Reserved
;// <o1.1..2> DW0: Indicate Data Bus Width for Bank 0
;// <1=> 16-bit <2=> 32-bit
;// </h>
;// <h> Bank 0 Control Register (BANKCON0)
;// <o2.13..14> Tacs: Address Set-up Time before nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o2.11..12> Tcos: Chip Selection Set-up Time before nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o2.8..10> Tacc: Access Cycle
;// <0=> 1 clocks <1=> 2 clocks <2=> 3 clocks <3=> 4 clocks
;// <4=> 6 clocks <5=> 8 clocks <6=> 10 clocks <7=> 14 clocks
;// <o2.6..7> Tcoh: Chip Selection Hold Time after nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o2.4..5> Tcah: Address Hold Time after nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o2.2..3> Tacp: Page Mode Access Cycle at Page Mode
;// <0=> 2 clocks <1=> 3 clocks <2=> 4 clocks <3=> 6 clocks
;// <o2.0..1> PMC: Page Mode Configuration
;// <0=> normal (1 data) <1=> 4 data <2=> 8 data <3=> 16 data
;// </h>
;// <h> Bank 1 Control Register (BANKCON1)
;// <o3.13..14> Tacs: Address Set-up Time before nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o3.11..12> Tcos: Chip Selection Set-up Time before nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o3.8..10> Tacc: Access Cycle
;// <0=> 1 clocks <1=> 2 clocks <2=> 3 clocks <3=> 4 clocks
;// <4=> 6 clocks <5=> 8 clocks <6=> 10 clocks <7=> 14 clocks
;// <o3.6..7> Tcoh: Chip Selection Hold Time after nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o3.4..5> Tcah: Address Hold Time after nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o3.2..3> Tacp: Page Mode Access Cycle at Page Mode
;// <0=> 2 clocks <1=> 3 clocks <2=> 4 clocks <3=> 6 clocks
;// <o3.0..1> PMC: Page Mode Configuration
;// <0=> normal (1 data) <1=> 4 data <2=> 8 data <3=> 16 data
;// </h>
;// <h> Bank 2 Control Register (BANKCON2)
;// <o4.13..14> Tacs: Address Set-up Time before nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o4.11..12> Tcos: Chip Selection Set-up Time before nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o4.8..10> Tacc: Access Cycle
;// <0=> 1 clocks <1=> 2 clocks <2=> 3 clocks <3=> 4 clocks
;// <4=> 6 clocks <5=> 8 clocks <6=> 10 clocks <7=> 14 clocks
;// <o4.6..7> Tcoh: Chip Selection Hold Time after nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o4.4..5> Tcah: Address Hold Time after nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o4.2..3> Tacp: Page Mode Access Cycle at Page Mode
;// <0=> 2 clocks <1=> 3 clocks <2=> 4 clocks <3=> 6 clocks
;// <o4.0..1> PMC: Page Mode Configuration
;// <0=> normal (1 data) <1=> 4 data <2=> 8 data <3=> 16 data
;// </h>
;// <h> Bank 3 Control Register (BANKCON3)
;// <o5.13..14> Tacs: Address Set-up Time before nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o5.11..12> Tcos: Chip Selection Set-up Time before nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o5.8..10> Tacc: Access Cycle
;// <0=> 1 clocks <1=> 2 clocks <2=> 3 clocks <3=> 4 clocks
;// <4=> 6 clocks <5=> 8 clocks <6=> 10 clocks <7=> 14 clocks
;// <o5.6..7> Tcoh: Chip Selection Hold Time after nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o5.4..5> Tcah: Address Hold Time after nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o5.2..3> Tacp: Page Mode Access Cycle at Page Mode
;// <0=> 2 clocks <1=> 3 clocks <2=> 4 clocks <3=> 6 clocks
;// <o5.0..1> PMC: Page Mode Configuration
;// <0=> normal (1 data) <1=> 4 data <2=> 8 data <3=> 16 data
;// </h>
;// <h> Bank 4 Control Register (BANKCON4)
;// <o6.13..14> Tacs: Address Set-up Time before nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o6.11..12> Tcos: Chip Selection Set-up Time before nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o6.8..10> Tacc: Access Cycle
;// <0=> 1 clocks <1=> 2 clocks <2=> 3 clocks <3=> 4 clocks
;// <4=> 6 clocks <5=> 8 clocks <6=> 10 clocks <7=> 14 clocks
;// <o6.6..7> Tcoh: Chip Selection Hold Time after nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o6.4..5> Tcah: Address Hold Time after nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o6.2..3> Tacp: Page Mode Access Cycle at Page Mode
;// <0=> 2 clocks <1=> 3 clocks <2=> 4 clocks <3=> 6 clocks
;// <o6.0..1> PMC: Page Mode Configuration
;// <0=> normal (1 data) <1=> 4 data <2=> 8 data <3=> 16 data
;// </h>
;// <h> Bank 5 Control Register (BANKCON5)
;// <o7.13..14> Tacs: Address Set-up Time before nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o7.11..12> Tcos: Chip Selection Set-up Time before nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o7.8..10> Tacc: Access Cycle
;// <0=> 1 clocks <1=> 2 clocks <2=> 3 clocks <3=> 4 clocks
;// <4=> 6 clocks <5=> 8 clocks <6=> 10 clocks <7=> 14 clocks
;// <o7.6..7> Tcoh: Chip Selection Hold Time after nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o7.4..5> Tcah: Address Hold Time after nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o7.2..3> Tacp: Page Mode Access Cycle at Page Mode
;// <0=> 2 clocks <1=> 3 clocks <2=> 4 clocks <3=> 6 clocks
;// <o7.0..1> PMC: Page Mode Configuration
;// <0=> normal (1 data) <1=> 4 data <2=> 8 data <3=> 16 data
;// </h>
;// <h> Bank 6 Control Register (BANKCON6)
;// <o8.15..16> Memory Type Selection
;// <0=> ROM or SRAM <3=> SDRAM
;// <o8.13..14> Tacs: Address Set-up Time before nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o8.11..12> Tcos: Chip Selection Set-up Time before nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o8.8..10> Tacc: Access Cycle
;// <0=> 1 clocks <1=> 2 clocks <2=> 3 clocks <3=> 4 clocks
;// <4=> 6 clocks <5=> 8 clocks <6=> 10 clocks <7=> 14 clocks
;// <o8.6..7> Tcoh: Chip Selection Hold Time after nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o8.4..5> Tcah: Address Hold Time after nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o8.2..3> Tacp/Trcd: Page Mode Access Cycle at Page Mode / RAS to CAS Delay
;// <i> Parameter depends on Memory Type: if type SRAM then parameter is Tacp,
;// <i> if type is SDRAM then parameter is Trcd
;// <i> For SDRAM 6 cycles setting is not allowed
;// <0=> 2 clocks <1=> 3 clocks <2=> 4 clocks <3=> 6 clocks
;// <o8.0..1> PMC/SCAN: Page Mode Configuration / Column Address Number <0-3>
;// <i> Parameter depends on Memory Type: if type SRAM then parameter is PMC,
;// <i> if type is SDRAM then parameter is SCAN
;// </h>
;// <h> Bank 7 Control Register (BANKCON7)
;// <o9.15..16> Memory Type Selection
;// <0=> ROM or SRAM <3=> SDRAM
;// <o9.13..14> Tacs: Address Set-up Time before nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o9.11..12> Tcos: Chip Selection Set-up Time before nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o9.8..10> Tacc: Access Cycle
;// <0=> 1 clocks <1=> 2 clocks <2=> 3 clocks <3=> 4 clocks
;// <4=> 6 clocks <5=> 8 clocks <6=> 10 clocks <7=> 14 clocks
;// <o9.6..7> Tcoh: Chip Selection Hold Time after nOE
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o9.4..5> Tcah: Address Hold Time after nGCS
;// <0=> 0 clocks <1=> 1 clocks <2=> 2 clocks <3=> 4 clocks
;// <o9.2..3> Tacp/Trcd: Page Mode Access Cycle at Page Mode / RAS to CAS Delay
;// <i> Parameter depends on Memory Type: if type SRAM then parameter is Tacp,
;// <i> if type is SDRAM then parameter is Trcd
;// <i> For SDRAM 6 cycles setting is not allowed
;// <0=> 2 clocks <1=> 3 clocks <2=> 4 clocks <3=> 6 clocks
;// <o9.0..1> PMC/SCAN: Page Mode Configuration / Column Address Number <0-3>
;// <i> Parameter depends on Memory Type: if type SRAM then parameter is PMC,
;// <i> if type is SDRAM then parameter is SCAN
;// </h>
;// <h> SDRAM Refresh Control Register (REFRESH)
;// <o10.23> REFEN: SDRAM Refresh Enable
;// <o10.22> TREFMD: SDRAM Refresh Mode
;// <0=> CBR/Auto Refresh <1=> Self Refresh
;// <o10.20..21> Trp: SDRAM RAS Pre-charge Time
;// <0=> 2 clocks <1=> 3 clocks <2=> 4 clocks <3=> Reserved
;// <o10.18..19> Tsrc: SDRAM Semi Row Cycle Time
;// <i> SDRAM Row cycle time: Trc = Tsrc + Trp
;// <0=> 4 clocks <1=> 5 clocks <2=> 6 clocks <3=> 7 clocks
;// <o10.0..10> Refresh Counter <0-1023>
;// <i> Refresh Period = (2048 - Refresh Count + 1) / HCLK
;// </h>
;// <h> Flexible Bank Size Register (BANKSIZE)
;// <o11.7> BURST_EN: ARM Core Burst Operation Enable
;// <o11.5> SCKE_EN: SDRAM Power Down Mode Enable
;// <o11.4> SCLK_EN: SCLK Enabled During SDRAM Access Cycle
;// <0=> SCLK is always active <1=> SCLK is active only during the access
;// <o11.0..2> BK76MAP: BANK6 and BANK7 Memory Map
;// <0=> 32MB / 32MB <1=> 64MB / 64MB <2=> 128MB / 128MB
;// <4=> 2MB / 2MB <5=> 4MB / 4MB <6=> 8MB / 8MB <7=> 16MB / 16MB
;// <o11.0..10> Refresh Counter <0-1023>
;// <i> Refresh Period = (2048 - Refresh Count + 1) / HCLK
;// </h>
;// <h> SDRAM Mode Register Set Register 6 (MRSRB6)
;// <o12.7> WBL: Write Burst Length
;// <0=> Burst (Fixed)
;// <o12.7..8> TM: Test Mode
;// <0=> Mode register set (Fixed)
;// <o12.4..6> CL: CAS Latency
;// <0=> 1 clocks <1=> 2 clocks <2=> 3 clocks
;// <o12.3> BT: Burst Type
;// <0=> Sequential (Fixed)
;// <o12.0..2> BL: Burst Length
;// <0=> 1 (Fixed)
;// </h>
;// <h> SDRAM Mode Register Set Register 7 (MRSRB7)
;// <o13.7> WBL: Write Burst Length
;// <0=> Burst (Fixed)
;// <o13.7..8> TM: Test Mode
;// <0=> Mode register set (Fixed)
;// <o13.4..6> CL: CAS Latency
;// <0=> 1 clocks <1=> 2 clocks <2=> 3 clocks
;// <o13.3> BT: Burst Type
;// <0=> Sequential (Fixed)
;// <o13.0..2> BL: Burst Length
;// <0=> 1 (Fixed)
;// </h>
;// </e> Memory Controller Setup
MC_SETUP EQU 0
BWSCON_Val EQU 0x22000000
BANKCON0_Val EQU 0x00000700
BANKCON1_Val EQU 0x00000700
BANKCON2_Val EQU 0x00000700
BANKCON3_Val EQU 0x00000700
BANKCON4_Val EQU 0x00000700
BANKCON5_Val EQU 0x00000700
BANKCON6_Val EQU 0x00018005
BANKCON7_Val EQU 0x00018005
REFRESH_Val EQU 0x008404F3
BANKSIZE_Val EQU 0x00000032
MRSRB6_Val EQU 0x00000020
MRSRB7_Val EQU 0x00000020
;----------------------- I/O Port Definitions ----------------------------------
GPA_BASE EQU 0x56000000 ; GPA Base Address
GPB_BASE EQU 0x56000010 ; GPB Base Address
GPC_BASE EQU 0x56000020 ; GPC Base Address
GPD_BASE EQU 0x56000030 ; GPD Base Address
GPE_BASE EQU 0x56000040 ; GPE Base Address
GPF_BASE EQU 0x56000050 ; GPF Base Address
GPG_BASE EQU 0x56000060 ; GPG Base Address
GPH_BASE EQU 0x56000070 ; GPH Base Address
GPJ_BASE EQU 0x560000D0 ; GPJ Base Address
GPCON_OFS EQU 0x00 ; Control Register Offset
GPDAT_OFS EQU 0x04 ; Data Register Offset
GPUP_OFS EQU 0x08 ; Pull-up Disable Register Offset
;// <e> I/O Setup
GP_SETUP EQU 1
;// <e> Port A Settings
;// <h> Port A Control Register (GPACON)
;// <o1.22> GPA22 <0=> Output <1=> nFCE
;// <o1.21> GPA21 <0=> Output <1=> nRSTOUT
;// <o1.20> GPA20 <0=> Output <1=> nFRE
;// <o1.19> GPA19 <0=> Output <1=> nFWE
;// <o1.18> GPA18 <0=> Output <1=> ALE
;// <o1.17> GPA17 <0=> Output <1=> CLE
;// <o1.16> GPA16 <0=> Output <1=> nGCS[5]
;// <o1.15> GPA15 <0=> Output <1=> nGCS[4]
;// <o1.14> GPA14 <0=> Output <1=> nGCS[3]
;// <o1.13> GPA13 <0=> Output <1=> nGCS[2]
;// <o1.12> GPA12 <0=> Output <1=> nGCS[1]
;// <o1.11> GPA11 <0=> Output <1=> ADDR26
;// <o1.10> GPA10 <0=> Output <1=> ADDR25
;// <o1.9> GPA9 <0=> Output <1=> ADDR24
;// <o1.8> GPA8 <0=> Output <1=> ADDR23
;// <o1.7> GPA7 <0=> Output <1=> ADDR22
;// <o1.6> GPA6 <0=> Output <1=> ADDR21
;// <o1.5> GPA5 <0=> Output <1=> ADDR20
;// <o1.4> GPA4 <0=> Output <1=> ADDR19
;// <o1.3> GPA3 <0=> Output <1=> ADDR18
;// <o1.2> GPA2 <0=> Output <1=> ADDR17
;// <o1.1> GPA1 <0=> Output <1=> ADDR16
;// <o1.0> GPA0 <0=> Output <1=> ADDR0
;// </h>
;// </e>
GPA_SETUP EQU 0
GPACON_Val EQU 0x000003FF
;// <e> Port B Settings
;// <h> Port B Control Register (GPBCON)
;// <o1.20..21> GPB10 <0=> Input <1=> Output <2=> nXDREQ0 <3=> Reserved
;// <o1.18..19> GPB9 <0=> Input <1=> Output <2=> nXDACK0 <3=> Reserved
;// <o1.16..17> GPB8 <0=> Input <1=> Output <2=> nXDREQ1 <3=> Reserved
;// <o1.14..15> GPB7 <0=> Input <1=> Output <2=> nXDACK1 <3=> Reserved
;// <o1.12..13> GPB6 <0=> Input <1=> Output <2=> nXBREQ <3=> Reserved
;// <o1.10..11> GPB5 <0=> Input <1=> Output <2=> nXBACK <3=> Reserved
;// <o1.8..9> GPB4 <0=> Input <1=> Output <2=> TCLK[0] <3=> Reserved
;// <o1.6..7> GPB3 <0=> Input <1=> Output <2=> TOUT3 <3=> Reserved
;// <o1.4..5> GPB2 <0=> Input <1=> Output <2=> TOUT2 <3=> Reserved
;// <o1.2..3> GPB1 <0=> Input <1=> Output <2=> TOUT1 <3=> Reserved
;// <o1.0..1> GPB0 <0=> Input <1=> Output <2=> TOUT0 <3=> Reserved
;// </h>
;// <h> Port B Pull-up Settings Register (GPBUP)
;// <o2.10> GPB10 Pull-up Disable
;// <o2.9> GPB9 Pull-up Disable
;// <o2.8> GPB8 Pull-up Disable
;// <o2.7> GPB7 Pull-up Disable
;// <o2.6> GPB6 Pull-up Disable
;// <o2.5> GPB5 Pull-up Disable
;// <o2.4> GPB4 Pull-up Disable
;// <o2.3> GPB3 Pull-up Disable
;// <o2.2> GPB2 Pull-up Disable
;// <o2.1> GPB1 Pull-up Disable
;// <o2.0> GPB0 Pull-up Disable
;// </h>
;// </e>
GPB_SETUP EQU 0
GPBCON_Val EQU 0x00000000
GPBUP_Val EQU 0x00000000
;// <e> Port C Settings
;// <h> Port C Control Register (GPCCON)
;// <o1.30..31> GPC15 <0=> Input <1=> Output <2=> VD[7] <3=> Reserved
;// <o1.28..29> GPC14 <0=> Input <1=> Output <2=> VD[6] <3=> Reserved
;// <o1.26..27> GPC13 <0=> Input <1=> Output <2=> VD[5] <3=> Reserved
;// <o1.24..25> GPC12 <0=> Input <1=> Output <2=> VD[4] <3=> Reserved
;// <o1.22..23> GPC11 <0=> Input <1=> Output <2=> VD[3] <3=> Reserved
;// <o1.20..21> GPC10 <0=> Input <1=> Output <2=> VD[2] <3=> Reserved
;// <o1.18..19> GPC9 <0=> Input <1=> Output <2=> VD[1] <3=> Reserved
;// <o1.16..17> GPC8 <0=> Input <1=> Output <2=> VD[0] <3=> Reserved
;// <o1.14..15> GPC7 <0=> Input <1=> Output <2=> LCD_LPCREVB <3=> Reserved
;// <o1.12..13> GPC6 <0=> Input <1=> Output <2=> LCD_LPCREV <3=> Reserved
;// <o1.10..11> GPC5 <0=> Input <1=> Output <2=> LCD_LPCOE <3=> Reserved
;// <o1.8..9> GPC4 <0=> Input <1=> Output <2=> VM <3=> I2SSDI
;// <o1.6..7> GPC3 <0=> Input <1=> Output <2=> VFRAME <3=> Reserved
;// <o1.4..5> GPC2 <0=> Input <1=> Output <2=> VLINE <3=> Reserved
;// <o1.2..3> GPC1 <0=> Input <1=> Output <2=> VCLK <3=> Reserved
;// <o1.0..1> GPC0 <0=> Input <1=> Output <2=> LEND <3=> Reserved
;// </h>
;// <h> Port C Pull-up Settings Register (GPCUP)
;// <o2.15> GPC15 Pull-up Disable
;// <o2.14> GPC14 Pull-up Disable
;// <o2.13> GPC13 Pull-up Disable
;// <o2.12> GPC12 Pull-up Disable
;// <o2.11> GPC11 Pull-up Disable
;// <o2.10> GPC10 Pull-up Disable
;// <o2.9> GPC9 Pull-up Disable
;// <o2.8> GPC8 Pull-up Disable
;// <o2.7> GPC7 Pull-up Disable
;// <o2.6> GPC6 Pull-up Disable
;// <o2.5> GPC5 Pull-up Disable
;// <o2.4> GPC4 Pull-up Disable
;// <o2.3> GPC3 Pull-up Disable
;// <o2.2> GPC2 Pull-up Disable
;// <o2.1> GPC1 Pull-up Disable
;// <o2.0> GPC0 Pull-up Disable
;// </h>
;// </e>
GPC_SETUP EQU 0
GPCCON_Val EQU 0x00000000
GPCUP_Val EQU 0x00000000
;// <e> Port D Settings
;// <h> Port D Control Register (GPDCON)
;// <o1.30..31> GPD15 <0=> Input <1=> Output <2=> VD[23] <3=> nSS0
;// <o1.28..29> GPD14 <0=> Input <1=> Output <2=> VD[22] <3=> nSS1
;// <o1.26..27> GPD13 <0=> Input <1=> Output <2=> VD[21] <3=> Reserved
;// <o1.24..25> GPD12 <0=> Input <1=> Output <2=> VD[20] <3=> Reserved
;// <o1.22..23> GPD11 <0=> Input <1=> Output <2=> VD[19] <3=> Reserved
;// <o1.20..21> GPD10 <0=> Input <1=> Output <2=> VD[18] <3=> SPICLK1
;// <o1.18..19> GPD9 <0=> Input <1=> Output <2=> VD[17] <3=> SPIMOSI1
;// <o1.16..17> GPD8 <0=> Input <1=> Output <2=> VD[16] <3=> SPIMISO1
;// <o1.14..15> GPD7 <0=> Input <1=> Output <2=> VD[15] <3=> Reserved
;// <o1.12..13> GPD6 <0=> Input <1=> Output <2=> VD[14] <3=> Reserved
;// <o1.10..11> GPD5 <0=> Input <1=> Output <2=> VD[13] <3=> Reserved
;// <o1.8..9> GPD4 <0=> Input <1=> Output <2=> VD[12] <3=> Reserved
;// <o1.6..7> GPD3 <0=> Input <1=> Output <2=> VD[11] <3=> Reserved
;// <o1.4..5> GPD2 <0=> Input <1=> Output <2=> VD[10] <3=> Reserved
;// <o1.2..3> GPD1 <0=> Input <1=> Output <2=> VD[9] <3=> Reserved
;// <o1.0..1> GPD0 <0=> Input <1=> Output <2=> VD[8] <3=> Reserved
;// </h>
;// <h> Port D Pull-up Settings Register (GPDUP)
;// <o2.15> GPD15 Pull-up Disable
;// <o2.14> GPD14 Pull-up Disable
;// <o2.13> GPD13 Pull-up Disable
;// <o2.12> GPD12 Pull-up Disable
;// <o2.11> GPD11 Pull-up Disable
;// <o2.10> GPD10 Pull-up Disable
;// <o2.9> GPD9 Pull-up Disable
;// <o2.8> GPD8 Pull-up Disable
;// <o2.7> GPD7 Pull-up Disable
;// <o2.6> GPD6 Pull-up Disable
;// <o2.5> GPD5 Pull-up Disable
;// <o2.4> GPD4 Pull-up Disable
;// <o2.3> GPD3 Pull-up Disable
;// <o2.2> GPD2 Pull-up Disable
;// <o2.1> GPD1 Pull-up Disable
;// <o2.0> GPD0 Pull-up Disable
;// </h>
;// </e>
GPD_SETUP EQU 0
GPDCON_Val EQU 0x00000000
GPDUP_Val EQU 0x00000000
;// <e> Port E Settings
;// <h> Port E Control Register (GPECON)
;// <o1.30..31> GPE15 <0=> Input <1=> Output <2=> IICSDA <3=> Reserved
;// <i> This pad is open-drain, and has no pull-up option.
;// <o1.28..29> GPE14 <0=> Input <1=> Output <2=> IICSCL <3=> Reserved
;// <i> This pad is open-drain, and has no pull-up option.
;// <o1.26..27> GPE13 <0=> Input <1=> Output <2=> SPICLK0 <3=> Reserved
;// <o1.24..25> GPE12 <0=> Input <1=> Output <2=> SPIMOSI0 <3=> Reserved
;// <o1.22..23> GPE11 <0=> Input <1=> Output <2=> SPIMISO0 <3=> Reserved
;// <o1.20..21> GPE10 <0=> Input <1=> Output <2=> SDDAT3 <3=> Reserved
;// <o1.18..19> GPE9 <0=> Input <1=> Output <2=> SDDAT2 <3=> Reserved
;// <o1.16..17> GPE8 <0=> Input <1=> Output <2=> SDDAT1 <3=> Reserved
;// <o1.14..15> GPE7 <0=> Input <1=> Output <2=> SDDAT0 <3=> Reserved
;// <o1.12..13> GPE6 <0=> Input <1=> Output <2=> SDCMD <3=> Reserved
;// <o1.10..11> GPE5 <0=> Input <1=> Output <2=> SDCLK <3=> Reserved
;// <o1.8..9> GPE4 <0=> Input <1=> Output <2=> I2SDO <3=> AC_SDATA_OUT
;// <o1.6..7> GPE3 <0=> Input <1=> Output <2=> I2SDI <3=> AC_SDATA_IN
;// <o1.4..5> GPE2 <0=> Input <1=> Output <2=> CDCLK <3=> AC_nRESET
;// <o1.2..3> GPE1 <0=> Input <1=> Output <2=> I2SSCLK <3=> AC_BIT_CLK
;// <o1.0..1> GPE0 <0=> Input <1=> Output <2=> I2SLRCK <3=> AC_SYNC
;// </h>
;// <h> Port E Pull-up Settings Register (GPEUP)
;// <o2.13> GPE13 Pull-up Disable
;// <o2.12> GPE12 Pull-up Disable
;// <o2.11> GPE11 Pull-up Disable
;// <o2.10> GPE10 Pull-up Disable
;// <o2.9> GPE9 Pull-up Disable
;// <o2.8> GPE8 Pull-up Disable
;// <o2.7> GPE7 Pull-up Disable
;// <o2.6> GPE6 Pull-up Disable
;// <o2.5> GPE5 Pull-up Disable
;// <o2.4> GPE4 Pull-up Disable
;// <o2.3> GPE3 Pull-up Disable
;// <o2.2> GPE2 Pull-up Disable
;// <o2.1> GPE1 Pull-up Disable
;// <o2.0> GPE0 Pull-up Disable
;// </h>
;// </e>
GPE_SETUP EQU 0
GPECON_Val EQU 0x00000000
GPEUP_Val EQU 0x00000000
;// <e> Port F Settings
;// <h> Port F Control Register (GPFCON)
;// <o1.14..15> GPF7 <0=> Input <1=> Output <2=> EINT[7] <3=> Reserved
;// <o1.12..13> GPF6 <0=> Input <1=> Output <2=> EINT[6] <3=> Reserved
;// <o1.10..11> GPF5 <0=> Input <1=> Output <2=> EINT[5] <3=> Reserved
;// <o1.8..9> GPF4 <0=> Input <1=> Output <2=> EINT[4] <3=> Reserved
;// <o1.6..7> GPF3 <0=> Input <1=> Output <2=> EINT[3] <3=> Reserved
;// <o1.4..5> GPF2 <0=> Input <1=> Output <2=> EINT[2] <3=> Reserved
;// <o1.2..3> GPF1 <0=> Input <1=> Output <2=> EINT[1] <3=> Reserved
;// <o1.0..1> GPF0 <0=> Input <1=> Output <2=> EINT[0] <3=> Reserved
;// </h>
;// <h> Port F Pull-up Settings Register (GPFUP)
;// <o2.7> GPF7 Pull-up Disable
;// <o2.6> GPF6 Pull-up Disable
;// <o2.5> GPF5 Pull-up Disable
;// <o2.4> GPF4 Pull-up Disable
;// <o2.3> GPF3 Pull-up Disable
;// <o2.2> GPF2 Pull-up Disable
;// <o2.1> GPF1 Pull-up Disable
;// <o2.0> GPF0 Pull-up Disable
;// </h>
;// </e>
GPF_SETUP EQU 1
GPFCON_Val EQU 0x000000AA
GPFUP_Val EQU 0x0000000F
;// <e> Port G Settings
;// <h> Port G Control Register (GPGCON)
;// <o1.30..31> GPG15 <0=> Input <1=> Output <2=> EINT[23] <3=> Reserved
;// <o1.28..29> GPG14 <0=> Input <1=> Output <2=> EINT[22] <3=> Reserved
;// <o1.26..27> GPG13 <0=> Input <1=> Output <2=> EINT[21] <3=> Reserved
;// <o1.24..25> GPG12 <0=> Input <1=> Output <2=> EINT[20] <3=> Reserved
;// <o1.22..23> GPG11 <0=> Input <1=> Output <2=> EINT[19] <3=> TCLK[1]
;// <o1.20..21> GPG10 <0=> Input <1=> Output <2=> EINT[18] <3=> nCTS1
;// <o1.18..19> GPG9 <0=> Input <1=> Output <2=> EINT[17] <3=> nRTS1
;// <o1.16..17> GPG8 <0=> Input <1=> Output <2=> EINT[16] <3=> Reserved
;// <o1.14..15> GPG7 <0=> Input <1=> Output <2=> EINT[15] <3=> SPICLK1
;// <o1.12..13> GPG6 <0=> Input <1=> Output <2=> EINT[14] <3=> SPIMOSI1
;// <o1.10..11> GPG5 <0=> Input <1=> Output <2=> EINT[13] <3=> SPIMISO1
;// <o1.8..9> GPG4 <0=> Input <1=> Output <2=> EINT[12] <3=> LCD_PWRDN
;// <o1.6..7> GPG3 <0=> Input <1=> Output <2=> EINT[11] <3=> nSS1
;// <o1.4..5> GPG2 <0=> Input <1=> Output <2=> EINT[10] <3=> nSS0
;// <o1.2..3> GPG1 <0=> Input <1=> Output <2=> EINT[9] <3=> Reserved
;// <o1.0..1> GPG0 <0=> Input <1=> Output <2=> EINT[8] <3=> Reserved
;// </h>
;// <h> Port G Pull-up Settings Register (GPGUP)
;// <o2.15> GPG15 Pull-up Disable
;// <o2.14> GPG14 Pull-up Disable
;// <o2.13> GPG13 Pull-up Disable
;// <o2.12> GPG12 Pull-up Disable
;// <o2.11> GPG11 Pull-up Disable
;// <o2.10> GPG10 Pull-up Disable
;// <o2.9> GPG9 Pull-up Disable
;// <o2.8> GPG8 Pull-up Disable
;// <o2.7> GPG7 Pull-up Disable
;// <o2.6> GPG6 Pull-up Disable
;// <o2.5> GPG5 Pull-up Disable
;// <o2.4> GPG4 Pull-up Disable
;// <o2.3> GPG3 Pull-up Disable
;// <o2.2> GPG2 Pull-up Disable
;// <o2.1> GPG1 Pull-up Disable
;// <o2.0> GPG0 Pull-up Disable
;// </h>
;// </e>
GPG_SETUP EQU 0
GPGCON_Val EQU 0x00000000
GPGUP_Val EQU 0x00000000
;// <e> Port H Settings
;// <h> Port H Control Register (GPHCON)
;// <o1.20..21> GPH10 <0=> Input <1=> Output <2=> CLKOUT1 <3=> Reserved
;// <o1.18..19> GPH9 <0=> Input <1=> Output <2=> CLKOUT0 <3=> Reserved
;// <o1.16..17> GPH8 <0=> Input <1=> Output <2=> UEXTCLK <3=> Reserved
;// <o1.14..15> GPH7 <0=> Input <1=> Output <2=> RXD[2] <3=> nCTS1
;// <o1.12..13> GPH6 <0=> Input <1=> Output <2=> TXD[2] <3=> nRTS1
;// <o1.10..11> GPH5 <0=> Input <1=> Output <2=> RXD[1] <3=> Reserved
;// <o1.8..9> GPH4 <0=> Input <1=> Output <2=> TXD[1] <3=> Reserved
;// <o1.6..7> GPH3 <0=> Input <1=> Output <2=> RXD[0] <3=> Reserved
;// <o1.4..5> GPH2 <0=> Input <1=> Output <2=> TXD[0] <3=> Reserved
;// <o1.2..3> GPH1 <0=> Input <1=> Output <2=> nRTS0 <3=> Reserved
;// <o1.0..1> GPH0 <0=> Input <1=> Output <2=> nCTS0 <3=> Reserved
;// </h>
;// <h> Port H Pull-up Settings Register (GPHUP)
;// <o2.10> GPH10 Pull-up Disable
;// <o2.9> GPH9 Pull-up Disable
;// <o2.8> GPH8 Pull-up Disable
;// <o2.7> GPH7 Pull-up Disable
;// <o2.6> GPH6 Pull-up Disable
;// <o2.5> GPH5 Pull-up Disable
;// <o2.4> GPH4 Pull-up Disable
;// <o2.3> GPH3 Pull-up Disable
;// <o2.2> GPH2 Pull-up Disable
;// <o2.1> GPH1 Pull-up Disable
;// <o2.0> GPH0 Pull-up Disable
;// </h>
;// </e>
GPH_SETUP EQU 0
GPHCON_Val EQU 0x00000000
GPHUP_Val EQU 0x00000000
;// <e> Port J Settings
;// <h> Port J Control Register (GPJCON)
;// <o1.24..25> GPJ12 <0=> Input <1=> Output <2=> CAMRESET <3=> Reserved
;// <o1.22..23> GPJ11 <0=> Input <1=> Output <2=> CAMCLKOUT <3=> Reserved
;// <o1.20..21> GPJ10 <0=> Input <1=> Output <2=> CAMHREF <3=> Reserved
;// <o1.18..19> GPJ9 <0=> Input <1=> Output <2=> CAMVSYNC <3=> Reserved
;// <o1.16..17> GPJ8 <0=> Input <1=> Output <2=> CAMPCLK <3=> Reserved
;// <o1.14..15> GPJ7 <0=> Input <1=> Output <2=> CAMDATA[7] <3=> Reserved
;// <o1.12..13> GPJ6 <0=> Input <1=> Output <2=> CAMDATA[6] <3=> Reserved
;// <o1.10..11> GPJ5 <0=> Input <1=> Output <2=> CAMDATA[5] <3=> Reserved
;// <o1.8..9> GPJ4 <0=> Input <1=> Output <2=> CAMDATA[4] <3=> Reserved
;// <o1.6..7> GPJ3 <0=> Input <1=> Output <2=> CAMDATA[3] <3=> Reserved
;// <o1.4..5> GPJ2 <0=> Input <1=> Output <2=> CAMDATA[2] <3=> Reserved
;// <o1.2..3> GPJ1 <0=> Input <1=> Output <2=> CAMDATA[1] <3=> Reserved
;// <o1.0..1> GPJ0 <0=> Input <1=> Output <2=> CAMDATA[0] <3=> Reserved
;// </h>
;// <h> Port J Pull-up Settings Register (GPJUP)
;// <o2.12> GPJ12 Pull-up Disable
;// <o2.11> GPJ11 Pull-up Disable
;// <o2.10> GPJ10 Pull-up Disable
;// <o2.9> GPJ9 Pull-up Disable
;// <o2.8> GPJ8 Pull-up Disable
;// <o2.7> GPJ7 Pull-up Disable
;// <o2.6> GPJ6 Pull-up Disable
;// <o2.5> GPJ5 Pull-up Disable
;// <o2.4> GPJ4 Pull-up Disable
;// <o2.3> GPJ3 Pull-up Disable
;// <o2.2> GPJ2 Pull-up Disable
;// <o2.1> GPJ1 Pull-up Disable
;// <o2.0> GPJ0 Pull-up Disable
;// </h>
;// </e>
GPJ_SETUP EQU 0
GPJCON_Val EQU 0x00000000
GPJUP_Val EQU 0x00000000
;// </e> I/O Setup
;----------------------- CODE --------------------------------------------------
PRESERVE8
; Area Definition and Entry Point
; Startup Code must be linked first at Address at which it expects to run.
AREA RESET, CODE, READONLY
ARM
; Exception Vectors
; Mapped to Address 0.
; Absolute addressing mode must be used.
; Dummy Handlers are implemented as infinite loops which can be modified.
EXPORT Entry_Point
Entry_Point
Vectors LDR PC, Reset_Addr
LDR PC, Undef_Addr
LDR PC, SWI_Addr
LDR PC, PAbt_Addr
LDR PC, DAbt_Addr
NOP
LDR PC, IRQ_Addr
LDR PC, FIQ_Addr
Reset_Addr DCD Reset_Handler
Undef_Addr DCD Undef_Handler
SWI_Addr DCD SWI_Handler
PAbt_Addr DCD PAbt_Handler
DAbt_Addr DCD DAbt_Handler
DCD 0 ; Reserved Address
IRQ_Addr DCD IRQ_Handler
FIQ_Addr DCD FIQ_Handler
Undef_Handler B Undef_Handler
SWI_Handler B SWI_Handler
PAbt_Handler B PAbt_Handler
;DAbt_Handler B DAbt_Handler
FIQ_Handler B FIQ_Handler
;*
;*************************************************************************
;*
;* Interrupt handling
;*
;*************************************************************************
;*
; DAbt Handler
DAbt_Handler
IMPORT rt_hw_trap_dabt
sub sp, sp, #72
stmia sp, {r0 - r12} ;/* Calling r0-r12 */
add r8, sp, #60
stmdb r8, {sp, lr} ;/* Calling SP, LR */
str lr, [r8, #0] ;/* Save calling PC */
mrs r6, spsr
str r6, [r8, #4] ;/* Save CPSR */
str r0, [r8, #8] ;/* Save OLD_R0 */
mov r0, sp
bl rt_hw_trap_dabt
;##########################################
; Reset Handler
EXPORT Reset_Handler
Reset_Handler
; Watchdog Setup ---------------------------------------------------------------
IF WT_SETUP != 0
LDR R0, =WT_BASE
LDR R1, =WTCON_Val
LDR R2, =WTDAT_Val
STR R2, [R0, #WTCNT_OFS]
STR R2, [R0, #WTDAT_OFS]
STR R1, [R0, #WTCON_OFS]
ENDIF
; Clock Setup ------------------------------------------------------------------
IF (:LNOT:(:DEF:NO_CLOCK_SETUP)):LAND:(CLOCK_SETUP != 0)
LDR R0, =CLOCK_BASE
LDR R1, =LOCKTIME_Val
STR R1, [R0, #LOCKTIME_OFS]
MOV R1, #CLKDIVN_Val
STR R1, [R0, #CLKDIVN_OFS]
LDR R1, =CAMDIVN_Val
STR R1, [R0, #CAMDIVN_OFS]
LDR R1, =MPLLCON_Val
STR R1, [R0, #MPLLCON_OFS]
LDR R1, =UPLLCON_Val
STR R1, [R0, #UPLLCON_OFS]
MOV R1, #CLKSLOW_Val
STR R1, [R0, #CLKSLOW_OFS]
LDR R1, =CLKCON_Val
STR R1, [R0, #CLKCON_OFS]
ENDIF
; Memory Controller Setup ------------------------------------------------------
IF (:LNOT:(:DEF:NO_MC_SETUP)):LAND:(CLOCK_SETUP != 0)
LDR R0, =MC_BASE
LDR R1, =BWSCON_Val
STR R1, [R0, #BWSCON_OFS]
LDR R1, =BANKCON0_Val
STR R1, [R0, #BANKCON0_OFS]
LDR R1, =BANKCON1_Val
STR R1, [R0, #BANKCON1_OFS]
LDR R1, =BANKCON2_Val
STR R1, [R0, #BANKCON2_OFS]
LDR R1, =BANKCON3_Val
STR R1, [R0, #BANKCON3_OFS]
LDR R1, =BANKCON4_Val
STR R1, [R0, #BANKCON4_OFS]
LDR R1, =BANKCON5_Val
STR R1, [R0, #BANKCON5_OFS]
LDR R1, =BANKCON6_Val
STR R1, [R0, #BANKCON6_OFS]
LDR R1, =BANKCON7_Val
STR R1, [R0, #BANKCON7_OFS]
LDR R1, =REFRESH_Val
STR R1, [R0, #REFRESH_OFS]
MOV R1, #BANKSIZE_Val
STR R1, [R0, #BANKSIZE_OFS]
MOV R1, #MRSRB6_Val
STR R1, [R0, #MRSRB6_OFS]
MOV R1, #MRSRB7_Val
STR R1, [R0, #MRSRB7_OFS]
ENDIF
; I/O Pins Setup ---------------------------------------------------------------
IF (:LNOT:(:DEF:NO_GP_SETUP)):LAND:(GP_SETUP != 0)
IF GPA_SETUP != 0
LDR R0, =GPA_BASE
LDR R1, =GPACON_Val
STR R1, [R0, #GPCON_OFS]
ENDIF
IF GPB_SETUP != 0
LDR R0, =GPB_BASE
LDR R1, =GPBCON_Val
STR R1, [R0, #GPCON_OFS]
LDR R1, =GPBUP_Val
STR R1, [R0, #GPUP_OFS]
ENDIF
IF GPC_SETUP != 0
LDR R0, =GPC_BASE
LDR R1, =GPCCON_Val
STR R1, [R0, #GPCON_OFS]
LDR R1, =GPCUP_Val
STR R1, [R0, #GPUP_OFS]
ENDIF
IF GPD_SETUP != 0
LDR R0, =GPD_BASE
LDR R1, =GPDCON_Val
STR R1, [R0, #GPCON_OFS]
LDR R1, =GPDUP_Val
STR R1, [R0, #GPUP_OFS]
ENDIF
IF GPE_SETUP != 0
LDR R0, =GPE_BASE
LDR R1, =GPECON_Val
STR R1, [R0, #GPCON_OFS]
LDR R1, =GPEUP_Val
STR R1, [R0, #GPUP_OFS]
ENDIF
IF GPF_SETUP != 0
LDR R0, =GPF_BASE
LDR R1, =GPFCON_Val
STR R1, [R0, #GPCON_OFS]
LDR R1, =GPFUP_Val
STR R1, [R0, #GPUP_OFS]
ENDIF
IF GPG_SETUP != 0
LDR R0, =GPG_BASE
LDR R1, =GPGCON_Val
STR R1, [R0, #GPCON_OFS]
LDR R1, =GPGUP_Val
STR R1, [R0, #GPUP_OFS]
ENDIF
IF GPH_SETUP != 0
LDR R0, =GPH_BASE
LDR R1, =GPHCON_Val
STR R1, [R0, #GPCON_OFS]
LDR R1, =GPHUP_Val
STR R1, [R0, #GPUP_OFS]
ENDIF
IF GPJ_SETUP != 0
LDR R0, =GPJ_BASE
LDR R1, =GPJCON_Val
STR R1, [R0, #GPCON_OFS]
LDR R1, =GPJUP_Val
STR R1, [R0, #GPUP_OFS]
ENDIF
ENDIF
; Copy Exception Vectors to Internal RAM ---------------------------------------
IF :DEF:RAM_INTVEC
ADR R8, Vectors ; Source
LDR R9, =IRAM_BASE ; Destination
LDMIA R8!, {R0-R7} ; Load Vectors
STMIA R9!, {R0-R7} ; Store Vectors
LDMIA R8!, {R0-R7} ; Load Handler Addresses
STMIA R9!, {R0-R7} ; Store Handler Addresses
ENDIF
; Setup Stack for each mode ----------------------------------------------------
LDR R0, =Stack_Top
; Enter Undefined Instruction Mode and set its Stack Pointer
MSR CPSR_c, #Mode_UND:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #UND_Stack_Size
; Enter Abort Mode and set its Stack Pointer
MSR CPSR_c, #Mode_ABT:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #ABT_Stack_Size
; Enter FIQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_FIQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #FIQ_Stack_Size
; Enter IRQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_IRQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #IRQ_Stack_Size
; Enter Supervisor Mode and set its Stack Pointer
MSR CPSR_c, #Mode_SVC:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #SVC_Stack_Size
; Enter User Mode and set its Stack Pointer
; MSR CPSR_c, #Mode_USR
MOV SP, R0
SUB SL, SP, #USR_Stack_Size
; Enter the C code -------------------------------------------------------------
IMPORT __main
LDR R0, =__main
BX R0
IMPORT rt_interrupt_enter
IMPORT rt_interrupt_leave
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
IMPORT rt_hw_trap_irq
IRQ_Handler PROC
EXPORT IRQ_Handler
STMFD sp!, {r0-r12,lr}
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
; if rt_thread_switch_interrupt_flag set, jump to
; rt_hw_context_switch_interrupt_do and don't return
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #1
BEQ rt_hw_context_switch_interrupt_do
LDMFD sp!, {r0-r12,lr}
SUBS pc, lr, #4
ENDP
; /*
; * void rt_hw_context_switch_interrupt_do(rt_base_t flag)
; */
rt_hw_context_switch_interrupt_do PROC
EXPORT rt_hw_context_switch_interrupt_do
MOV r1, #0 ; clear flag
STR r1, [r0]
LDMFD sp!, {r0-r12,lr}; reload saved registers
STMFD sp!, {r0-r3} ; save r0-r3
MOV r1, sp
ADD sp, sp, #16 ; restore sp
SUB r2, lr, #4 ; save old task's pc to r2
MRS r3, spsr ; get cpsr of interrupt thread
; switch to SVC mode and no interrupt
MSR cpsr_c, #I_Bit:OR:F_Bit:OR:Mode_SVC
STMFD sp!, {r2} ; push old task's pc
STMFD sp!, {r4-r12,lr}; push old task's lr,r12-r4
MOV r4, r1 ; Special optimised code below
MOV r5, r3
LDMFD r4!, {r0-r3}
STMFD sp!, {r0-r3} ; push old task's r3-r0
STMFD sp!, {r5} ; push old task's cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push old task's spsr
LDR r4, =rt_interrupt_from_thread
LDR r5, [r4]
STR sp, [r5] ; store sp in preempted tasks's TCB
LDR r6, =rt_interrupt_to_thread
LDR r6, [r6]
LDR sp, [r6] ; get new task's stack pointer
LDMFD sp!, {r4} ; pop new task's spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task's psr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12,lr,pc} ; pop new task's r0-r12,lr & pc
ENDP
IF :DEF:__MICROLIB
EXPORT __heap_base
EXPORT __heap_limit
ELSE
; User Initial Stack & Heap
AREA |.text|, CODE, READONLY
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + USR_Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ENDIF
END
|
vandercookking/h7_device_RTT
| 4,361
|
rt-thread/libcpu/arm/realview-a8-vmm/context_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
#include <rtconfig.h>
#ifdef RT_USING_VMM
#include <vmm.h>
#endif
.section .text, "ax"
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
cpsid i
bx lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr, r0
bx lr
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc
.section .bss.share.isr
_guest_switch_lvl:
.word 0
.globl vmm_virq_update
.section .text.isr, "ax"
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
stmfd sp!, {r0-r12, lr} @ push lr & register file
mrs r4, cpsr
tst lr, #0x01
orrne r4, r4, #0x20 @ it's thumb code
stmfd sp!, {r4} @ push cpsr
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
#ifdef RT_USING_VMM
#ifdef RT_VMM_USING_DOMAIN
@ need to make sure we are in vmm domain as we would use rt_current_thread
ldr r2, =vmm_domain_val
ldr r7, [r2]
mcr p15, 0, r7, c3, c0
#endif
/* check whether vmm thread, otherwise, update vIRQ */
ldr r3, =rt_current_thread
ldr r4, [r3]
ldr r5, =vmm_thread
cmp r4, r5
beq switch_to_guest
@ not falling into guest. Simple task ;-)
ldmfd sp!, {r6} @ pop new task cpsr to spsr
msr spsr_cxsf, r6
ldmfd sp!, {r0-r12, lr, pc}^
switch_to_guest:
#ifdef RT_VMM_USING_DOMAIN
@ the stack is saved in the guest domain so we need to
@ come back to the guest domain to get the registers.
ldr r1, =super_domain_val
ldr r0, [r1]
mcr p15, 0, r0, c3, c0
#endif
/* The user can do nearly anything in rt_thread_idle_excute because it will
call the thread->cleanup. One common thing is sending events and wake up
threads. So the guest thread will be preempted. This is the only point that
the guest thread would call rt_hw_context_switch and "yield".
More over, rt_schedule will call this function and this function *will*
reentrant. If that happens, we need to make sure that call the
rt_thread_idle_excute and vmm_virq_update again and we are in super domain.
I use a "reference count" to achieve such behaviour. If you have better
idea, tell me. */
ldr r4, =_guest_switch_lvl
ldr r5, [r4]
add r5, r5, #1
str r5, [r4]
cmp r5, #1
bne _switch_through
bl rt_thread_idle_excute
bl vmm_virq_update
/* we need _guest_switch_lvl to protect until _switch_through, but it's OK
* to cleanup the reference count here because the code below will not be
* reentrant. */
sub r5, r5, #1
str r5, [r4]
#ifdef RT_VMM_USING_DOMAIN
ldr r1, =guest_domain_val
ldr r0, [r1]
mcr p15, 0, r0, c3, c0
#endif
_switch_through:
#endif /* RT_USING_VMM */
ldmfd sp!, {r4} @ pop new task cpsr to spsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
ldr ip, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r0, [ip]
str r3, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [r2]
bx lr
|
vandercookking/h7_device_RTT
| 9,785
|
rt-thread/libcpu/arm/realview-a8-vmm/start_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
#include <rtconfig.h>
#ifdef RT_USING_VMM
#include <vmm.h>
.equ orig_irq_isr, LINUX_VECTOR_POS+0x18
#else
#undef RT_VMM_USING_DOMAIN
#endif
.equ Mode_USR, 0x10
.equ Mode_FIQ, 0x11
.equ Mode_IRQ, 0x12
.equ Mode_SVC, 0x13
.equ Mode_ABT, 0x17
.equ Mode_UND, 0x1B
.equ Mode_SYS, 0x1F
.equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled
.equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled
#ifndef RT_USING_VMM
.equ UND_Stack_Size, 0x00000000
.equ SVC_Stack_Size, 0x00000100
.equ ABT_Stack_Size, 0x00000000
.equ RT_FIQ_STACK_PGSZ, 0x00000000
.equ RT_IRQ_STACK_PGSZ, 0x00000100
.equ USR_Stack_Size, 0x00000100
#define ISR_Stack_Size (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
RT_FIQ_STACK_PGSZ + RT_IRQ_STACK_PGSZ)
#else
#define ISR_Stack_Size (RT_FIQ_STACK_PGSZ + RT_IRQ_STACK_PGSZ)
#endif
.section .data.share.isr
/* stack */
.globl stack_start
.globl stack_top
.align 3
stack_start:
.rept ISR_Stack_Size
.byte 0
.endr
stack_top:
.text
/* reset entry */
.globl _reset
_reset:
#ifdef RT_USING_VMM
/* save all the parameter and variable registers */
stmfd sp!, {r0-r12, lr}
#endif
/* set the cpu to SVC32 mode and disable interrupt */
mrs r0, cpsr
bic r0, r0, #0x1f
orr r0, r0, #0x13
msr cpsr_c, r0
/* setup stack */
bl stack_setup
/* clear .bss */
mov r0,#0 /* get a zero */
ldr r1,=__bss_start /* bss start */
ldr r2,=__bss_end /* bss end */
bss_loop:
cmp r1,r2 /* check if data to clear */
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_loop /* loop until done */
#ifdef RT_USING_VMM
/* clear .bss.share */
mov r0,#0 /* get a zero */
ldr r1,=__bss_share_start /* bss start */
ldr r2,=__bss_share_end /* bss end */
bss_share_loop:
cmp r1,r2 /* check if data to clear */
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_share_loop /* loop until done */
#endif
/* call C++ constructors of global objects */
ldr r0, =__ctors_start__
ldr r1, =__ctors_end__
ctor_loop:
cmp r0, r1
beq ctor_end
ldr r2, [r0], #4
stmfd sp!, {r0-r1}
mov lr, pc
bx r2
ldmfd sp!, {r0-r1}
b ctor_loop
ctor_end:
/* start RT-Thread Kernel */
#ifdef RT_USING_VMM
/* restore the parameter */
ldmfd sp!, {r0-r3}
bl vmm_entry
ldmfd sp!, {r4-r12, pc}
#else
ldr pc, _rtthread_startup
_rtthread_startup:
.word rtthread_startup
#endif
stack_setup:
ldr r0, =stack_top
#ifdef RT_USING_VMM
@ Linux use stmia to save r0, lr and spsr. To align to 8 byte boundary,
@ just allocate 16 bytes for it.
sub r0, r0, #16
#endif
#ifndef RT_USING_VMM
@ Set the startup stack for svc
mov sp, r0
#endif
#ifndef RT_USING_VMM
@ Enter Undefined Instruction Mode and set its Stack Pointer
msr cpsr_c, #Mode_UND|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #UND_Stack_Size
@ Enter Abort Mode and set its Stack Pointer
msr cpsr_c, #Mode_ABT|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #ABT_Stack_Size
#endif
@ Enter FIQ Mode and set its Stack Pointer
msr cpsr_c, #Mode_FIQ|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #RT_FIQ_STACK_PGSZ
@ Enter IRQ Mode and set its Stack Pointer
msr cpsr_c, #Mode_IRQ|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #RT_IRQ_STACK_PGSZ
/* come back to SVC mode */
msr cpsr_c, #Mode_SVC|I_Bit|F_Bit
bx lr
/* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */
.section .text.isr, "ax"
.align 5
.globl vector_fiq
vector_fiq:
stmfd sp!,{r0-r7,lr}
bl rt_hw_trap_fiq
ldmfd sp!,{r0-r7,lr}
subs pc, lr, #4
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_current_thread
.globl vmm_thread
.globl vmm_virq_check
.align 5
.globl vector_irq
vector_irq:
stmfd sp!, {r0-r12,lr}
#ifdef RT_VMM_USING_DOMAIN
@ save the last domain
mrc p15, 0, r5, c3, c0
@ switch to vmm domain as we are going to call vmm codes
ldr r1, =vmm_domain_val
ldr r4, [r1]
mcr p15, 0, r4, c3, c0
#endif
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
#ifdef RT_VMM_USING_DOMAIN
@ restore the last domain. It do some redundant work but simplify the
@ logic. It might be the guest domain so rt_thread_switch_interrupt_flag
@ should lay in .bss.share
mcr p15, 0, r5, c3, c0
#endif
@ if rt_thread_switch_interrupt_flag set, jump to
@ rt_hw_context_switch_interrupt_do and don't return
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq rt_hw_context_switch_interrupt_do
#ifndef RT_USING_VMM
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
#else
#ifdef RT_VMM_USING_DOMAIN
@ r4 is vmm_domain_val
@ back to vmm domain as we need access rt_current_thread
mcr p15, 0, r4, c3, c0
#endif
/* check whether we need to do IRQ routing
* ensure the int is disabled. Or there will be an infinite loop. */
ldr r0, =rt_current_thread
ldr r0, [r0]
ldr r1, =vmm_thread
cmp r0, r1
beq switch_to_guest
#ifdef RT_VMM_USING_DOMAIN
@ r5 is domain of interrupted context
@ it might be super_domain_val or vmm_domain_val so we need to restore it.
mcr p15, 0, r5, c3, c0
#endif
@ switch back if the interrupted thread is not vmm
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
switch_to_guest:
#ifdef RT_VMM_USING_DOMAIN
@ We are going to execute rt-thread code but accessing the content of the
@ guest. So switch to super domain.
ldr r1, =super_domain_val
ldr r0, [r1]
mcr p15, 0, r0, c3, c0
#endif
/* check whether there is a pending interrupt for Guest OS */
bl vmm_virq_check
#ifdef RT_VMM_USING_DOMAIN
@ All done, restore the guest domain.
mcr p15, 0, r5, c3, c0
#endif
cmp r0, #0x0
beq route_irq_to_guest
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
route_irq_to_guest:
ldmfd sp!, {r0-r12,lr}
b orig_irq_isr
#endif /* RT_USING_VMM */
rt_hw_context_switch_interrupt_do:
mov r1, #0 @ clear flag
str r1, [r0]
mov r1, sp @ r1 point to {r0-r3} in stack
add sp, sp, #4*4
ldmfd sp!, {r4-r12,lr}@ reload saved registers
mrs r0, spsr @ get cpsr of interrupt thread
sub r2, lr, #4 @ save old task's pc to r2
@ Switch to SVC mode with no interrupt. If the usr mode guest is
@ interrupted, this will just switch to the stack of kernel space.
@ save the registers in kernel space won't trigger data abort.
msr cpsr_c, #I_Bit|F_Bit|Mode_SVC
stmfd sp!, {r2} @ push old task's pc
stmfd sp!, {r4-r12,lr}@ push old task's lr,r12-r4
ldmfd r1, {r1-r4} @ restore r0-r3 of the interrupt thread
stmfd sp!, {r1-r4} @ push old task's r0-r3
stmfd sp!, {r0} @ push old task's cpsr
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5] @ store sp in preempted tasks's TCB
#ifdef RT_VMM_USING_DOMAIN
@ If a thread is wake up by interrupt, it should be RTT thread.
@ Make sure the domain is correct.
ldr r1, =vmm_domain_val
ldr r2, [r1]
mcr p15, 0, r2, c3, c0
#endif
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6] @ get new task's stack pointer
ldmfd sp!, {r4} @ pop new task's cpsr to spsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr
.macro push_svc_reg
sub sp, sp, #17 * 4 @/* Sizeof(struct rt_hw_exp_stack) */
stmia sp, {r0 - r12} @/* Calling r0-r12 */
mov r0, sp
mrs r6, spsr @/* Save CPSR */
str lr, [r0, #15*4] @/* Push PC */
str r6, [r0, #16*4] @/* Push CPSR */
cps #Mode_SVC
str sp, [r0, #13*4] @/* Save calling SP */
str lr, [r0, #14*4] @/* Save calling PC */
.endm
.align 5
.globl vector_swi
vector_swi:
push_svc_reg
bl rt_hw_trap_swi
b .
.align 5
.globl vector_undef
vector_undef:
push_svc_reg
bl rt_hw_trap_undef
b .
.align 5
.globl vector_pabt
vector_pabt:
push_svc_reg
bl rt_hw_trap_pabt
b .
.align 5
.globl vector_dabt
vector_dabt:
push_svc_reg
bl rt_hw_trap_dabt
b .
.align 5
.globl vector_resv
vector_resv:
push_svc_reg
bl rt_hw_trap_resv
b .
|
vandercookking/h7_device_RTT
| 2,833
|
rt-thread/libcpu/arm/realview-a8-vmm/cp15_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
.globl rt_cpu_get_smp_id
rt_cpu_get_smp_id:
mrc p15, #0, r0, c0, c0, #5
bx lr
.globl rt_cpu_vector_set_base
rt_cpu_vector_set_base:
mcr p15, #0, r0, c12, c0, #0
dsb
bx lr
.globl rt_hw_cpu_dcache_enable
rt_hw_cpu_dcache_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x00000004
mcr p15, #0, r0, c1, c0, #0
bx lr
.globl rt_hw_cpu_icache_enable
rt_hw_cpu_icache_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x00001000
mcr p15, #0, r0, c1, c0, #0
bx lr
_FLD_MAX_WAY:
.word 0x3ff
_FLD_MAX_IDX:
.word 0x7ff
.globl rt_cpu_dcache_clean_flush
rt_cpu_dcache_clean_flush:
push {r4-r11}
dmb
mrc p15, #1, r0, c0, c0, #1 @ read clid register
ands r3, r0, #0x7000000 @ get level of coherency
mov r3, r3, lsr #23
beq finished
mov r10, #0
loop1:
add r2, r10, r10, lsr #1
mov r1, r0, lsr r2
and r1, r1, #7
cmp r1, #2
blt skip
mcr p15, #2, r10, c0, c0, #0
isb
mrc p15, #1, r1, c0, c0, #0
and r2, r1, #7
add r2, r2, #4
ldr r4, _FLD_MAX_WAY
ands r4, r4, r1, lsr #3
clz r5, r4
ldr r7, _FLD_MAX_IDX
ands r7, r7, r1, lsr #13
loop2:
mov r9, r4
loop3:
orr r11, r10, r9, lsl r5
orr r11, r11, r7, lsl r2
mcr p15, #0, r11, c7, c14, #2
subs r9, r9, #1
bge loop3
subs r7, r7, #1
bge loop2
skip:
add r10, r10, #2
cmp r3, r10
bgt loop1
finished:
dsb
isb
pop {r4-r11}
bx lr
.globl rt_hw_cpu_dcache_disable
rt_hw_cpu_dcache_disable:
push {r4-r11, lr}
bl rt_cpu_dcache_clean_flush
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #0x00000004
mcr p15, #0, r0, c1, c0, #0
pop {r4-r11, lr}
bx lr
.globl rt_hw_cpu_icache_disable
rt_hw_cpu_icache_disable:
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #0x00001000
mcr p15, #0, r0, c1, c0, #0
bx lr
.globl rt_cpu_mmu_disable
rt_cpu_mmu_disable:
mcr p15, #0, r0, c8, c7, #0 @ invalidate tlb
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #1
mcr p15, #0, r0, c1, c0, #0 @ clear mmu bit
dsb
bx lr
.globl rt_cpu_mmu_enable
rt_cpu_mmu_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x001
mcr p15, #0, r0, c1, c0, #0 @ set mmu enable bit
dsb
bx lr
.globl rt_cpu_tlb_set
rt_cpu_tlb_set:
mcr p15, #0, r0, c2, c0, #0
dmb
bx lr
|
vandercookking/h7_device_RTT
| 7,549
|
rt-thread/libcpu/arm/cortex-r4/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2009-01-20 Bernard first version
* 2011-07-22 Bernard added thumb mode porting
* 2013-05-24 Grissiom port to CCS
* 2013-05-26 Grissiom optimize for ARMv7
* 2013-10-20 Grissiom port to GCC
*/
#include <rtconfig.h>
.text
.arm
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_hw_trap_irq
/*
* rt_base_t rt_hw_interrupt_disable()
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS r0, cpsr
CPSID IF
BX lr
/*
* void rt_hw_interrupt_enable(rt_base_t level)
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
MSR cpsr_c, r0
BX lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to)
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
STMDB sp!, {lr} @ push pc (lr should be pushed in place of PC)
STMDB sp!, {r0-r12, lr} @ push lr & register file
MRS r4, cpsr
TST lr, #0x01
ORRNE r4, r4, #0x20 @ it's thumb code
STMDB sp!, {r4} @ push cpsr
#if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING)
VMRS r4, fpexc
TST r4, #0x40000000
BEQ __no_vfp_frame1
VSTMDB sp!, {d0-d15}
VMRS r5, fpscr
@ TODO: add support for Common VFPv3.
@ Save registers like FPINST, FPINST2
STMDB sp!, {r5}
__no_vfp_frame1:
STMDB sp!, {r4}
#endif
STR sp, [r0] @ store sp in preempted tasks TCB
LDR sp, [r1] @ get new task stack pointer
#if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING)
LDMIA sp!, {r0} @ get fpexc
VMSR fpexc, r0 @ restore fpexc
TST r0, #0x40000000
BEQ __no_vfp_frame2
LDMIA sp!, {r1} @ get fpscr
VMSR fpscr, r1
VLDMIA sp!, {d0-d15}
__no_vfp_frame2:
#endif
LDMIA sp!, {r4} @ pop new task cpsr to spsr
MSR spsr_cxsf, r4
LDMIA sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr
/*
* void rt_hw_context_switch_to(rt_uint32 to)
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
LDR sp, [r0] @ get new task stack pointer
#if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING)
LDMIA sp!, {r0} @ get fpexc
VMSR fpexc, r0
TST r0, #0x40000000
BEQ __no_vfp_frame_to
LDMIA sp!, {r1} @ get fpscr
VMSR fpscr, r1
VLDMIA sp!, {d0-d15}
__no_vfp_frame_to:
#endif
LDMIA sp!, {r4} @ pop new task cpsr to spsr
MSR spsr_cxsf, r4
LDMIA sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to)@
*/
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1 @ set rt_thread_switch_interrupt_flag to 1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
STR r0, [r2]
_reswitch:
LDR r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
STR r1, [r2]
BX lr
.globl IRQ_Handler
IRQ_Handler:
STMDB sp!, {r0-r12,lr}
#if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING)
VMRS r0, fpexc
TST r0, #0x40000000
BEQ __no_vfp_frame_str_irq
VSTMDB sp!, {d0-d15}
VMRS r1, fpscr
@ TODO: add support for Common VFPv3.
@ Save registers like FPINST, FPINST2
STMDB sp!, {r1}
__no_vfp_frame_str_irq:
STMDB sp!, {r0}
#endif
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
@ if rt_thread_switch_interrupt_flag set, jump to
@ rt_hw_context_switch_interrupt_do and don't return
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #1
BEQ rt_hw_context_switch_interrupt_do
#if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING)
LDMIA sp!, {r0} @ get fpexc
VMSR fpexc, r0
TST r0, #0x40000000
BEQ __no_vfp_frame_ldr_irq
LDMIA sp!, {r1} @ get fpscr
VMSR fpscr, r1
VLDMIA sp!, {d0-d15}
__no_vfp_frame_ldr_irq:
#endif
LDMIA sp!, {r0-r12,lr}
SUBS pc, lr, #4
/*
* void rt_hw_context_switch_interrupt_do(rt_base_t flag)
*/
.globl rt_hw_context_switch_interrupt_do
rt_hw_context_switch_interrupt_do:
MOV r1, #0 @ clear flag
STR r1, [r0]
#if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING)
LDMIA sp!, {r0} @ get fpexc
VMSR fpexc, r0
TST r0, #0x40000000
BEQ __no_vfp_frame_do1
LDMIA sp!, {r1} @ get fpscr
VMSR fpscr, r1
VLDMIA sp!, {d0-d15}
__no_vfp_frame_do1:
#endif
LDMIA sp!, {r0-r12,lr} @ reload saved registers
STMDB sp, {r0-r3} @ save r0-r3. We will restore r0-r3 in the SVC
@ mode so there is no need to update SP.
SUB r1, sp, #16 @ save the right SP value in r1, so we could restore r0-r3.
SUB r2, lr, #4 @ save old task's pc to r2
MRS r3, spsr @ get cpsr of interrupt thread
@ switch to SVC mode and no interrupt
CPSID IF, #0x13
STMDB sp!, {r2} @ push old task's pc
STMDB sp!, {r4-r12,lr} @ push old task's lr,r12-r4
LDMIA r1!, {r4-r7} @ restore r0-r3 of the interrupted thread
STMDB sp!, {r4-r7} @ push old task's r3-r0. We don't need to push/pop them to
@ r0-r3 because we just want to transfer the data and don't
@ use them here.
STMDB sp!, {r3} @ push old task's cpsr
#if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING)
VMRS r0, fpexc
TST r0, #0x40000000
BEQ __no_vfp_frame_do2
VSTMDB sp!, {d0-d15}
VMRS r1, fpscr
@ TODO: add support for Common VFPv3.
@ Save registers like FPINST, FPINST2
STMDB sp!, {r1}
__no_vfp_frame_do2:
STMDB sp!, {r0}
#endif
LDR r4, =rt_interrupt_from_thread
LDR r5, [r4]
STR sp, [r5] @ store sp in preempted tasks's TCB
LDR r6, =rt_interrupt_to_thread
LDR r6, [r6]
LDR sp, [r6] @ get new task's stack pointer
#if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING)
LDMIA sp!, {r0} @ get fpexc
VMSR fpexc, r0
TST r0, #0x40000000
BEQ __no_vfp_frame_do3
LDMIA sp!, {r1} @ get fpscr
VMSR fpscr, r1
VLDMIA sp!, {d0-d15}
__no_vfp_frame_do3:
#endif
LDMIA sp!, {r4} @ pop new task's cpsr to spsr
MSR spsr_cxsf, r4
LDMIA sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr
|
vandercookking/h7_device_RTT
| 13,962
|
rt-thread/libcpu/arm/cortex-r4/start_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
@-------------------------------------------------------------------------------
@ sys_core.asm
@
@ (c) Texas Instruments 2009-2013, All rights reserved.
@
#include <rtconfig.h>
.equ Mode_USR, 0x10
.equ Mode_FIQ, 0x11
.equ Mode_IRQ, 0x12
.equ Mode_SVC, 0x13
.equ Mode_ABT, 0x17
.equ Mode_UND, 0x1B
.equ Mode_SYS, 0x1F
.equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled
.equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled
.equ UND_Stack_Size, 0x00000000
.equ SVC_Stack_Size, 0x00000000
.equ ABT_Stack_Size, 0x00000000
.equ FIQ_Stack_Size, 0x00001000
.equ IRQ_Stack_Size, 0x00001000
.section .bss.noinit
/* stack */
.globl stack_start
.globl stack_top
.align 3
stack_start:
.rept (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + FIQ_Stack_Size + IRQ_Stack_Size)
.byte 0
.endr
stack_top:
.section .text, "ax"
.text
.arm
.globl _c_int00
.globl _reset
_reset:
@-------------------------------------------------------------------------------
@ Initialize CPU Registers
@ After reset, the CPU is in the Supervisor mode (M = 10011)
mov r0, #0x0000
mov r1, #0x0000
mov r2, #0x0000
mov r3, #0x0000
mov r4, #0x0000
mov r5, #0x0000
mov r6, #0x0000
mov r7, #0x0000
mov r8, #0x0000
mov r9, #0x0000
mov r10, #0x0000
mov r11, #0x0000
mov r12, #0x0000
mov r13, #0x0000
mrs r1, cpsr
msr spsr_cxsf, r1
cpsid if, #19
#if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING)
@ Turn on FPV coprocessor
mrc p15, #0x00, r2, c1, c0, #0x02
orr r2, r2, #0xF00000
mcr p15, #0x00, r2, c1, c0, #0x02
fmrx r2, fpexc
orr r2, r2, #0x40000000
fmxr fpexc, r2
#endif
@-------------------------------------------------------------------------------
@ Initialize Stack Pointers
ldr r0, =stack_top
@ Set the startup stack for svc
mov sp, r0
@ Enter Undefined Instruction Mode and set its Stack Pointer
msr cpsr_c, #Mode_UND|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #UND_Stack_Size
@ Enter Abort Mode and set its Stack Pointer
msr cpsr_c, #Mode_ABT|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #ABT_Stack_Size
@ Enter FIQ Mode and set its Stack Pointer
msr cpsr_c, #Mode_FIQ|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #FIQ_Stack_Size
@ Enter IRQ Mode and set its Stack Pointer
msr cpsr_c, #Mode_IRQ|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #IRQ_Stack_Size
@ Switch back to SVC
msr cpsr_c, #Mode_SVC|I_Bit|F_Bit
bl next1
next1:
bl next2
next2:
bl next3
next3:
bl next4
next4:
ldr lr, =_c_int00
bx lr
.globl data_init
data_init:
/* copy .data to SRAM */
ldr r1, =_sidata /* .data start in image */
ldr r2, =_edata /* .data end in image */
ldr r3, =_sdata /* sram data start */
data_loop:
ldr r0, [r1, #0]
str r0, [r3]
add r1, r1, #4
add r3, r3, #4
cmp r3, r2 /* check if data to clear */
blo data_loop /* loop until done */
/* clear .bss */
mov r0,#0 /* get a zero */
ldr r1,=__bss_start /* bss start */
ldr r2,=__bss_end /* bss end */
bss_loop:
cmp r1,r2 /* check if data to clear */
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_loop /* loop until done */
/* call C++ constructors of global objects */
ldr r0, =__ctors_start__
ldr r1, =__ctors_end__
ctor_loop:
cmp r0, r1
beq ctor_end
ldr r2, [r0], #4
stmfd sp!, {r0-r3, ip, lr}
mov lr, pc
bx r2
ldmfd sp!, {r0-r3, ip, lr}
b ctor_loop
ctor_end:
bx lr
@-------------------------------------------------------------------------------
@ Enable RAM ECC Support
.globl _coreEnableRamEcc_
_coreEnableRamEcc_:
stmfd sp!, {r0}
mrc p15, #0x00, r0, c1, c0, #0x01
orr r0, r0, #0x0C000000
mcr p15, #0x00, r0, c1, c0, #0x01
ldmfd sp!, {r0}
bx lr
@-------------------------------------------------------------------------------
@ Disable RAM ECC Support
.globl _coreDisableRamEcc_
_coreDisableRamEcc_:
stmfd sp!, {r0}
mrc p15, #0x00, r0, c1, c0, #0x01
bic r0, r0, #0x0C000000
mcr p15, #0x00, r0, c1, c0, #0x01
ldmfd sp!, {r0}
bx lr
@-------------------------------------------------------------------------------
@ Enable Flash ECC Support
.globl _coreEnableFlashEcc_
_coreEnableFlashEcc_:
stmfd sp!, {r0}
mrc p15, #0x00, r0, c1, c0, #0x01
orr r0, r0, #0x02000000
dmb
mcr p15, #0x00, r0, c1, c0, #0x01
ldmfd sp!, {r0}
bx lr
@-------------------------------------------------------------------------------
@ Disable Flash ECC Support
.globl _coreDisableFlashEcc_
_coreDisableFlashEcc_:
stmfd sp!, {r0}
mrc p15, #0x00, r0, c1, c0, #0x01
bic r0, r0, #0x02000000
mcr p15, #0x00, r0, c1, c0, #0x01
ldmfd sp!, {r0}
bx lr
@-------------------------------------------------------------------------------
@ Get data fault status register
.globl _coreGetDataFault_
_coreGetDataFault_:
mrc p15, #0, r0, c5, c0, #0
bx lr
@-------------------------------------------------------------------------------
@ Clear data fault status register
.globl _coreClearDataFault_
_coreClearDataFault_:
stmfd sp!, {r0}
mov r0, #0
mcr p15, #0, r0, c5, c0, #0
ldmfd sp!, {r0}
bx lr
@-------------------------------------------------------------------------------
@ Get instruction fault status register
.globl _coreGetInstructionFault_
_coreGetInstructionFault_:
mrc p15, #0, r0, c5, c0, #1
bx lr
@-------------------------------------------------------------------------------
@ Clear instruction fault status register
.globl _coreClearInstructionFault_
_coreClearInstructionFault_:
stmfd sp!, {r0}
mov r0, #0
mcr p15, #0, r0, c5, c0, #1
ldmfd sp!, {r0}
bx lr
@-------------------------------------------------------------------------------
@ Get data fault address register
.globl _coreGetDataFaultAddress_
_coreGetDataFaultAddress_:
mrc p15, #0, r0, c6, c0, #0
bx lr
@-------------------------------------------------------------------------------
@ Clear data fault address register
.globl _coreClearDataFaultAddress_
_coreClearDataFaultAddress_:
stmfd sp!, {r0}
mov r0, #0
mcr p15, #0, r0, c6, c0, #0
ldmfd sp!, {r0}
bx lr
@-------------------------------------------------------------------------------
@ Get instruction fault address register
.globl _coreGetInstructionFaultAddress_
_coreGetInstructionFaultAddress_:
mrc p15, #0, r0, c6, c0, #2
bx lr
@-------------------------------------------------------------------------------
@ Clear instruction fault address register
.globl _coreClearInstructionFaultAddress_
_coreClearInstructionFaultAddress_:
stmfd sp!, {r0}
mov r0, #0
mcr p15, #0, r0, c6, c0, #2
ldmfd sp!, {r0}
bx lr
@-------------------------------------------------------------------------------
@ Get auxiliary data fault status register
.globl _coreGetAuxiliaryDataFault_
_coreGetAuxiliaryDataFault_:
mrc p15, #0, r0, c5, c1, #0
bx lr
@-------------------------------------------------------------------------------
@ Clear auxiliary data fault status register
.globl _coreClearAuxiliaryDataFault_
_coreClearAuxiliaryDataFault_:
stmfd sp!, {r0}
mov r0, #0
mcr p15, #0, r0, c5, c1, #0
ldmfd sp!, {r0}
bx lr
@-------------------------------------------------------------------------------
@ Get auxiliary instruction fault status register
.globl _coreGetAuxiliaryInstructionFault_
_coreGetAuxiliaryInstructionFault_:
mrc p15, #0, r0, c5, c1, #1
bx lr
@-------------------------------------------------------------------------------
@ Clear auxiliary instruction fault status register
.globl _coreClearAuxiliaryInstructionFault_
_coreClearAuxiliaryInstructionFault_:
stmfd sp!, {r0}
mov r0, #0
mrc p15, #0, r0, c5, c1, #1
ldmfd sp!, {r0}
bx lr
@-------------------------------------------------------------------------------
@ Clear ESM CCM errorss
.globl _esmCcmErrorsClear_
_esmCcmErrorsClear_:
stmfd sp!, {r0-r2}
ldr r0, ESMSR1_REG @ load the ESMSR1 status register address
ldr r2, ESMSR1_ERR_CLR
str r2, [r0] @ clear the ESMSR1 register
ldr r0, ESMSR2_REG @ load the ESMSR2 status register address
ldr r2, ESMSR2_ERR_CLR
str r2, [r0] @ clear the ESMSR2 register
ldr r0, ESMSSR2_REG @ load the ESMSSR2 status register address
ldr r2, ESMSSR2_ERR_CLR
str r2, [r0] @ clear the ESMSSR2 register
ldr r0, ESMKEY_REG @ load the ESMKEY register address
mov r2, #0x5 @ load R2 with 0x5
str r2, [r0] @ clear the ESMKEY register
ldr r0, VIM_INTREQ @ load the INTREQ register address
ldr r2, VIM_INT_CLR
str r2, [r0] @ clear the INTREQ register
ldr r0, CCMR4_STAT_REG @ load the CCMR4 status register address
ldr r2, CCMR4_ERR_CLR
str r2, [r0] @ clear the CCMR4 status register
ldmfd sp!, {r0-r2}
bx lr
ESMSR1_REG: .word 0xFFFFF518
ESMSR2_REG: .word 0xFFFFF51C
ESMSR3_REG: .word 0xFFFFF520
ESMKEY_REG: .word 0xFFFFF538
ESMSSR2_REG: .word 0xFFFFF53C
CCMR4_STAT_REG: .word 0xFFFFF600
ERR_CLR_WRD: .word 0xFFFFFFFF
CCMR4_ERR_CLR: .word 0x00010000
ESMSR1_ERR_CLR: .word 0x80000000
ESMSR2_ERR_CLR: .word 0x00000004
ESMSSR2_ERR_CLR: .word 0x00000004
VIM_INT_CLR: .word 0x00000001
VIM_INTREQ: .word 0xFFFFFE20
@-------------------------------------------------------------------------------
@ Work Around for Errata CORTEX-R4#57:
@
@ Errata Description:
@ Conditional VMRS APSR_Nzcv, FPSCR May Evaluate With Incorrect Flags
@ Workaround:
@ Disable out-of-order single-precision floating point
@ multiply-accumulate instruction completion
.globl _errata_CORTEXR4_57_
_errata_CORTEXR4_57_:
push {r0}
mrc p15, #0, r0, c15, c0, #0 @ Read Secondary Auxiliary Control Register
orr r0, r0, #0x10000 @ Set BIT 16 (Set DOOFMACS)
mcr p15, #0, r0, c15, c0, #0 @ Write Secondary Auxiliary Control Register
pop {r0}
bx lr
@-------------------------------------------------------------------------------
@ Work Around for Errata CORTEX-R4#66:
@
@ Errata Description:
@ Register Corruption During A Load-Multiple Instruction At
@ an Exception Vector
@ Workaround:
@ Disable out-of-order completion for divide instructions in
@ Auxiliary Control register
.globl _errata_CORTEXR4_66_
_errata_CORTEXR4_66_:
push {r0}
mrc p15, #0, r0, c1, c0, #1 @ Read Auxiliary Control register
orr r0, r0, #0x80 @ Set BIT 7 (Disable out-of-order completion
@ for divide instructions.)
mcr p15, #0, r0, c1, c0, #1 @ Write Auxiliary Control register
pop {r0}
bx lr
.globl turnon_VFP
turnon_VFP:
@ Enable FPV
STMDB sp!, {r0}
fmrx r0, fpexc
orr r0, r0, #0x40000000
fmxr fpexc, r0
LDMIA sp!, {r0}
subs pc, lr, #4
.macro push_svc_reg
sub sp, sp, #17 * 4 @/* Sizeof(struct rt_hw_exp_stack) */
stmia sp, {r0 - r12} @/* Calling r0-r12 */
mov r0, sp
mrs r6, spsr @/* Save CPSR */
str lr, [r0, #15*4] @/* Push PC */
str r6, [r0, #16*4] @/* Push CPSR */
cps #Mode_SVC
str sp, [r0, #13*4] @/* Save calling SP */
str lr, [r0, #14*4] @/* Save calling PC */
.endm
.globl vector_svc
vector_svc:
push_svc_reg
bl rt_hw_trap_svc
b .
.globl vector_pabort
vector_pabort:
push_svc_reg
bl rt_hw_trap_pabt
b .
.globl vector_dabort
vector_dabort:
push_svc_reg
bl rt_hw_trap_dabt
b .
.globl vector_resv
vector_resv:
push_svc_reg
bl rt_hw_trap_resv
b .
|
vandercookking/h7_device_RTT
| 7,011
|
rt-thread/libcpu/arm/cortex-m4/context_iar.S
|
;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-17 Bernard first version
; * 2009-09-27 Bernard add protect when contex switch occurs
; * 2012-01-01 aozima support context switch load/store FPU register.
; * 2013-06-18 aozima add restore MSP feature.
; * 2013-06-23 aozima support lazy stack optimized.
; * 2018-07-24 aozima enhancement hard fault exception handler.
; */
;/**
; * @addtogroup cortex-m4
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
SECTION .text:CODE(2)
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
EXPORT rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS r0, PRIMASK
CPSID I
BX LR
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
EXPORT rt_hw_interrupt_enable
rt_hw_interrupt_enable:
MSR PRIMASK, r0
BX LR
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
EXPORT rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
EXPORT PendSV_Handler
PendSV_Handler:
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CBZ r1, pendsv_exit ; pendsv already handled
; clear rt_thread_switch_interrupt_flag to 0
MOV r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CBZ r1, switch_to_thread ; skip register save at the first time
MRS r1, psp ; get from thread stack pointer
#if defined ( __ARMVFP__ )
TST lr, #0x10 ; if(!EXC_RETURN[4])
BNE skip_push_fpu
VSTMDB r1!, {d8 - d15} ; push FPU register s16~s31
skip_push_fpu
#endif
STMFD r1!, {r4 - r11} ; push r4 - r11 register
#if defined ( __ARMVFP__ )
MOV r4, #0x00 ; flag = 0
TST lr, #0x10 ; if(!EXC_RETURN[4])
BNE push_flag
MOV r4, #0x01 ; flag = 1
push_flag
;STMFD r1!, {r4} ; push flag
SUB r1, r1, #0x04
STR r4, [r1]
#endif
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
#if defined ( __ARMVFP__ )
LDMFD r1!, {r3} ; pop flag
#endif
LDMFD r1!, {r4 - r11} ; pop r4 - r11 register
#if defined ( __ARMVFP__ )
CBZ r3, skip_pop_fpu
VLDMIA r1!, {d8 - d15} ; pop FPU register s16~s31
skip_pop_fpu
#endif
MSR psp, r1 ; update stack pointer
#if defined ( __ARMVFP__ )
ORR lr, lr, #0x10 ; lr |= (1 << 4), clean FPCA.
CBZ r3, return_without_fpu ; if(flag_r3 != 0)
BIC lr, lr, #0x10 ; lr &= ~(1 << 4), set FPCA.
return_without_fpu
#endif
pendsv_exit
; restore interrupt
MSR PRIMASK, r2
ORR lr, lr, #0x04
BX lr
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
EXPORT rt_hw_context_switch_to
rt_hw_context_switch_to:
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
#if defined ( __ARMVFP__ )
; CLEAR CONTROL.FPCA
MRS r2, CONTROL ; read
BIC r2, r2, #0x04 ; modify
MSR CONTROL, r2 ; write-back
#endif
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] ; read
ORR r1,r1,r2 ; modify
STR r1, [r0] ; write-back
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
NOP
MSR msp, r0
; enable interrupts at processor level
CPSIE F
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
; compatible with old version
EXPORT rt_hw_interrupt_thread_switch
rt_hw_interrupt_thread_switch:
BX lr
IMPORT rt_hw_hard_fault_exception
EXPORT HardFault_Handler
HardFault_Handler:
; get current context
MRS r0, msp ; get fault context from handler.
TST lr, #0x04 ; if(!EXC_RETURN[2])
BEQ _get_sp_done
MRS r0, psp ; get fault context from thread.
_get_sp_done
STMFD r0!, {r4 - r11} ; push r4 - r11 register
;STMFD r0!, {lr} ; push exec_return register
#if defined ( __ARMVFP__ )
SUB r0, r0, #0x04 ; push dummy for flag
STR lr, [r0]
#endif
SUB r0, r0, #0x04
STR lr, [r0]
TST lr, #0x04 ; if(!EXC_RETURN[2])
BEQ _update_msp
MSR psp, r0 ; update stack pointer to PSP.
B _update_done
_update_msp
MSR msp, r0 ; update stack pointer to MSP.
_update_done
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {lr}
ORR lr, lr, #0x04
BX lr
END
|
vandercookking/h7_device_RTT
| 7,122
|
rt-thread/libcpu/arm/cortex-m4/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2009-10-11 Bernard first version
* 2012-01-01 aozima support context switch load/store FPU register.
* 2013-06-18 aozima add restore MSP feature.
* 2013-06-23 aozima support lazy stack optimized.
* 2018-07-24 aozima enhancement hard fault exception handler.
*/
/**
* @addtogroup cortex-m4
*/
/*@{*/
.cpu cortex-m4
.syntax unified
.thumb
.text
.equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */
.equ NVIC_INT_CTRL, 0xE000ED04 /* interrupt control state register */
.equ NVIC_SYSPRI2, 0xE000ED20 /* system priority register (2) */
.equ NVIC_PENDSV_PRI, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */
.equ NVIC_PENDSVSET, 0x10000000 /* value to trigger PendSV exception */
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.global rt_hw_interrupt_disable
.type rt_hw_interrupt_disable, %function
rt_hw_interrupt_disable:
MRS r0, PRIMASK
CPSID I
BX LR
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.global rt_hw_interrupt_enable
.type rt_hw_interrupt_enable, %function
rt_hw_interrupt_enable:
MSR PRIMASK, r0
BX LR
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.global rt_hw_context_switch_interrupt
.type rt_hw_context_switch_interrupt, %function
.global rt_hw_context_switch
.type rt_hw_context_switch, %function
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
/* set rt_thread_switch_interrupt_flag to 1 */
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */
STR r0, [r2]
_reswitch:
LDR r2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
/* r0 --> switch from thread stack
* r1 --> switch to thread stack
* psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
*/
.global PendSV_Handler
.type PendSV_Handler, %function
PendSV_Handler:
/* disable interrupt to protect context switch */
MRS r2, PRIMASK
CPSID I
/* get rt_thread_switch_interrupt_flag */
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CBZ r1, pendsv_exit /* pendsv already handled */
/* clear rt_thread_switch_interrupt_flag to 0 */
MOV r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CBZ r1, switch_to_thread /* skip register save at the first time */
MRS r1, psp /* get from thread stack pointer */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
TST lr, #0x10 /* if(!EXC_RETURN[4]) */
IT EQ
VSTMDBEQ r1!, {d8 - d15} /* push FPU register s16~s31 */
#endif
STMFD r1!, {r4 - r11} /* push r4 - r11 register */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
MOV r4, #0x00 /* flag = 0 */
TST lr, #0x10 /* if(!EXC_RETURN[4]) */
IT EQ
MOVEQ r4, #0x01 /* flag = 1 */
STMFD r1!, {r4} /* push flag */
#endif
LDR r0, [r0]
STR r1, [r0] /* update from thread stack pointer */
switch_to_thread:
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] /* load thread stack pointer */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
LDMFD r1!, {r3} /* pop flag */
#endif
LDMFD r1!, {r4 - r11} /* pop r4 - r11 register */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
CMP r3, #0 /* if(flag_r3 != 0) */
IT NE
VLDMIANE r1!, {d8 - d15} /* pop FPU register s16~s31 */
#endif
MSR psp, r1 /* update stack pointer */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
ORR lr, lr, #0x10 /* lr |= (1 << 4), clean FPCA. */
CMP r3, #0 /* if(flag_r3 != 0) */
IT NE
BICNE lr, lr, #0x10 /* lr &= ~(1 << 4), set FPCA. */
#endif
pendsv_exit:
/* restore interrupt */
MSR PRIMASK, r2
ORR lr, lr, #0x04
BX lr
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.global rt_hw_context_switch_to
.type rt_hw_context_switch_to, %function
rt_hw_context_switch_to:
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
/* CLEAR CONTROL.FPCA */
MRS r2, CONTROL /* read */
BIC r2, #0x04 /* modify */
MSR CONTROL, r2 /* write-back */
#endif
/* set from thread to 0 */
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
/* set interrupt flag to 1 */
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
/* set the PendSV and SysTick exception priority */
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] /* read */
ORR r1,r1,r2 /* modify */
STR r1, [r0] /* write-back */
LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
/* restore MSP */
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
NOP
MSR msp, r0
/* enable interrupts at processor level */
CPSIE F
CPSIE I
/* ensure PendSV exception taken place before subsequent operation */
DSB
ISB
/* never reach here! */
/* compatible with old version */
.global rt_hw_interrupt_thread_switch
.type rt_hw_interrupt_thread_switch, %function
rt_hw_interrupt_thread_switch:
BX lr
NOP
.global HardFault_Handler
.type HardFault_Handler, %function
HardFault_Handler:
/* get current context */
MRS r0, msp /* get fault context from handler. */
TST lr, #0x04 /* if(!EXC_RETURN[2]) */
BEQ _get_sp_done
MRS r0, psp /* get fault context from thread. */
_get_sp_done:
STMFD r0!, {r4 - r11} /* push r4 - r11 register */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
STMFD r0!, {lr} /* push dummy for flag */
#endif
STMFD r0!, {lr} /* push exec_return register */
TST lr, #0x04 /* if(!EXC_RETURN[2]) */
BEQ _update_msp
MSR psp, r0 /* update stack pointer to PSP. */
B _update_done
_update_msp:
MSR msp, r0 /* update stack pointer to MSP. */
_update_done:
PUSH {LR}
BL rt_hw_hard_fault_exception
POP {LR}
ORR lr, lr, #0x04
BX lr
|
vandercookking/h7_device_RTT
| 6,982
|
rt-thread/libcpu/arm/cortex-m4/context_rvds.S
|
;/*
;* Copyright (c) 2006-2018, RT-Thread Development Team
;*
;* SPDX-License-Identifier: Apache-2.0
;*
; * Change Logs:
; * Date Author Notes
; * 2009-01-17 Bernard first version.
; * 2012-01-01 aozima support context switch load/store FPU register.
; * 2013-06-18 aozima add restore MSP feature.
; * 2013-06-23 aozima support lazy stack optimized.
; * 2018-07-24 aozima enhancement hard fault exception handler.
; */
;/**
; * @addtogroup cortex-m4
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
AREA |.text|, CODE, READONLY, ALIGN=2
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, PRIMASK
CPSID I
BX LR
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR PRIMASK, r0
BX LR
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch_interrupt
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
ENDP
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
PendSV_Handler PROC
EXPORT PendSV_Handler
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CBZ r1, pendsv_exit ; pendsv already handled
; clear rt_thread_switch_interrupt_flag to 0
MOV r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CBZ r1, switch_to_thread ; skip register save at the first time
MRS r1, psp ; get from thread stack pointer
IF {FPU} != "SoftVFP"
TST lr, #0x10 ; if(!EXC_RETURN[4])
VSTMFDEQ r1!, {d8 - d15} ; push FPU register s16~s31
ENDIF
STMFD r1!, {r4 - r11} ; push r4 - r11 register
IF {FPU} != "SoftVFP"
MOV r4, #0x00 ; flag = 0
TST lr, #0x10 ; if(!EXC_RETURN[4])
MOVEQ r4, #0x01 ; flag = 1
STMFD r1!, {r4} ; push flag
ENDIF
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
IF {FPU} != "SoftVFP"
LDMFD r1!, {r3} ; pop flag
ENDIF
LDMFD r1!, {r4 - r11} ; pop r4 - r11 register
IF {FPU} != "SoftVFP"
CMP r3, #0 ; if(flag_r3 != 0)
VLDMFDNE r1!, {d8 - d15} ; pop FPU register s16~s31
ENDIF
MSR psp, r1 ; update stack pointer
IF {FPU} != "SoftVFP"
ORR lr, lr, #0x10 ; lr |= (1 << 4), clean FPCA.
CMP r3, #0 ; if(flag_r3 != 0)
BICNE lr, lr, #0x10 ; lr &= ~(1 << 4), set FPCA.
ENDIF
pendsv_exit
; restore interrupt
MSR PRIMASK, r2
ORR lr, lr, #0x04
BX lr
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; * this fucntion is used to perform the first thread switch
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
; set to thread
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
IF {FPU} != "SoftVFP"
; CLEAR CONTROL.FPCA
MRS r2, CONTROL ; read
BIC r2, #0x04 ; modify
MSR CONTROL, r2 ; write-back
ENDIF
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] ; read
ORR r1,r1,r2 ; modify
STR r1, [r0] ; write-back
; trigger the PendSV exception (causes context switch)
LDR r0, =NVIC_INT_CTRL
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
MSR msp, r0
; enable interrupts at processor level
CPSIE F
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
ENDP
; compatible with old version
rt_hw_interrupt_thread_switch PROC
EXPORT rt_hw_interrupt_thread_switch
BX lr
ENDP
IMPORT rt_hw_hard_fault_exception
EXPORT HardFault_Handler
HardFault_Handler PROC
; get current context
TST lr, #0x04 ; if(!EXC_RETURN[2])
ITE EQ
MRSEQ r0, msp ; [2]=0 ==> Z=1, get fault context from handler.
MRSNE r0, psp ; [2]=1 ==> Z=0, get fault context from thread.
STMFD r0!, {r4 - r11} ; push r4 - r11 register
IF {FPU} != "SoftVFP"
STMFD r0!, {lr} ; push dummy for flag
ENDIF
STMFD r0!, {lr} ; push exec_return register
TST lr, #0x04 ; if(!EXC_RETURN[2])
ITE EQ
MSREQ msp, r0 ; [2]=0 ==> Z=1, update stack pointer to MSP.
MSRNE psp, r0 ; [2]=1 ==> Z=0, update stack pointer to PSP.
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {lr}
ORR lr, lr, #0x04
BX lr
ENDP
ALIGN 4
END
|
vandercookking/h7_device_RTT
| 5,779
|
rt-thread/libcpu/arm/cortex-m0/context_iar.S
|
;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2010-01-25 Bernard first version
; * 2012-06-01 aozima set pendsv priority to 0xFF.
; * 2012-08-17 aozima fixed bug: store r8 - r11.
; * 2013-06-18 aozima add restore MSP feature.
; */
;/**
; * @addtogroup CORTEX-M0
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SHPR3 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
SECTION .text:CODE(2)
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
EXPORT rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS r0, PRIMASK
CPSID I
BX LR
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
EXPORT rt_hw_interrupt_enable
rt_hw_interrupt_enable:
MSR PRIMASK, r0
BX LR
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
EXPORT rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOVS r3, #0x1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
EXPORT PendSV_Handler
PendSV_Handler:
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #0x00
BEQ pendsv_exit ; pendsv already handled
; clear rt_thread_switch_interrupt_flag to 0
MOVS r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CMP r1, #0x00
BEQ switch_to_thread ; skip register save at the first time
MRS r1, psp ; get from thread stack pointer
SUBS r1, r1, #0x20 ; space for {r4 - r7} and {r8 - r11}
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
STMIA r1!, {r4 - r7} ; push thread {r4 - r7} register to thread stack
MOV r4, r8 ; mov thread {r8 - r11} to {r4 - r7}
MOV r5, r9
MOV r6, r10
MOV r7, r11
STMIA r1!, {r4 - r7} ; push thread {r8 - r11} high register to thread stack
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
LDMIA r1!, {r4 - r7} ; pop thread {r4 - r7} register from thread stack
PUSH {r4 - r7} ; push {r4 - r7} to MSP for copy {r8 - r11}
LDMIA r1!, {r4 - r7} ; pop thread {r8 - r11} high register from thread stack to {r4 - r7}
MOV r8, r4 ; mov {r4 - r7} to {r8 - r11}
MOV r9, r5
MOV r10, r6
MOV r11, r7
POP {r4 - r7} ; pop {r4 - r7} from MSP
MSR psp, r1 ; update stack pointer
pendsv_exit
; restore interrupt
MSR PRIMASK, r2
MOVS r0, #0x04
RSBS r0, r0, #0x00
BX r0
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; * this fucntion is used to perform the first thread switch
; */
EXPORT rt_hw_context_switch_to
rt_hw_context_switch_to:
; set to thread
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOVS r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOVS r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SHPR3
LDR r1, =NVIC_PENDSV_PRI
LDR r2, [r0,#0x00] ; read
ORRS r1,r1,r2 ; modify
STR r1, [r0] ; write-back
; trigger the PendSV exception (causes context switch)
LDR r0, =NVIC_INT_CTRL
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
NOP
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
NOP
MSR msp, r0
; enable interrupts at processor level
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
; compatible with old version
EXPORT rt_hw_interrupt_thread_switch
rt_hw_interrupt_thread_switch:
BX lr
IMPORT rt_hw_hard_fault_exception
EXPORT HardFault_Handler
HardFault_Handler:
; get current context
MRS r0, psp ; get fault thread stack pointer
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {pc}
END
|
vandercookking/h7_device_RTT
| 6,278
|
rt-thread/libcpu/arm/cortex-m0/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2010-01-25 Bernard first version
* 2012-06-01 aozima set pendsv priority to 0xFF.
* 2012-08-17 aozima fixed bug: store r8 - r11.
* 2013-02-20 aozima port to gcc.
* 2013-06-18 aozima add restore MSP feature.
* 2013-11-04 bright fixed hardfault bug for gcc.
*/
.cpu cortex-m0
.fpu softvfp
.syntax unified
.thumb
.text
.equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */
.equ NVIC_INT_CTRL, 0xE000ED04 /* interrupt control state register */
.equ NVIC_SHPR3, 0xE000ED20 /* system priority register (3) */
.equ NVIC_PENDSV_PRI, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */
.equ NVIC_PENDSVSET, 0x10000000 /* value to trigger PendSV exception */
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.global rt_hw_interrupt_disable
.type rt_hw_interrupt_disable, %function
rt_hw_interrupt_disable:
MRS R0, PRIMASK
CPSID I
BX LR
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.global rt_hw_interrupt_enable
.type rt_hw_interrupt_enable, %function
rt_hw_interrupt_enable:
MSR PRIMASK, R0
BX LR
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* R0 --> from
* R1 --> to
*/
.global rt_hw_context_switch_interrupt
.type rt_hw_context_switch_interrupt, %function
.global rt_hw_context_switch
.type rt_hw_context_switch, %function
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
/* set rt_thread_switch_interrupt_flag to 1 */
LDR R2, =rt_thread_switch_interrupt_flag
LDR R3, [R2]
CMP R3, #1
BEQ _reswitch
MOVS R3, #1
STR R3, [R2]
LDR R2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */
STR R0, [R2]
_reswitch:
LDR R2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */
STR R1, [R2]
LDR R0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
LDR R1, =NVIC_PENDSVSET
STR R1, [R0]
BX LR
/* R0 --> switch from thread stack
* R1 --> switch to thread stack
* psr, pc, LR, R12, R3, R2, R1, R0 are pushed into [from] stack
*/
.global PendSV_Handler
.type PendSV_Handler, %function
PendSV_Handler:
/* disable interrupt to protect context switch */
MRS R2, PRIMASK
CPSID I
/* get rt_thread_switch_interrupt_flag */
LDR R0, =rt_thread_switch_interrupt_flag
LDR R1, [R0]
CMP R1, #0x00
BEQ pendsv_exit /* pendsv already handled */
/* clear rt_thread_switch_interrupt_flag to 0 */
MOVS R1, #0
STR R1, [R0]
LDR R0, =rt_interrupt_from_thread
LDR R1, [R0]
CMP R1, #0x00
BEQ switch_to_thread /* skip register save at the first time */
MRS R1, PSP /* get from thread stack pointer */
SUBS R1, R1, #0x20 /* space for {R4 - R7} and {R8 - R11} */
LDR R0, [R0]
STR R1, [R0] /* update from thread stack pointer */
STMIA R1!, {R4 - R7} /* push thread {R4 - R7} register to thread stack */
MOV R4, R8 /* mov thread {R8 - R11} to {R4 - R7} */
MOV R5, R9
MOV R6, R10
MOV R7, R11
STMIA R1!, {R4 - R7} /* push thread {R8 - R11} high register to thread stack */
switch_to_thread:
LDR R1, =rt_interrupt_to_thread
LDR R1, [R1]
LDR R1, [R1] /* load thread stack pointer */
LDMIA R1!, {R4 - R7} /* pop thread {R4 - R7} register from thread stack */
PUSH {R4 - R7} /* push {R4 - R7} to MSP for copy {R8 - R11} */
LDMIA R1!, {R4 - R7} /* pop thread {R8 - R11} high register from thread stack to {R4 - R7} */
MOV R8, R4 /* mov {R4 - R7} to {R8 - R11} */
MOV R9, R5
MOV R10, R6
MOV R11, R7
POP {R4 - R7} /* pop {R4 - R7} from MSP */
MSR PSP, R1 /* update stack pointer */
pendsv_exit:
/* restore interrupt */
MSR PRIMASK, R2
MOVS R0, #0x04
RSBS R0, R0, #0x00
BX R0
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* R0 --> to
*/
.global rt_hw_context_switch_to
.type rt_hw_context_switch_to, %function
rt_hw_context_switch_to:
LDR R1, =rt_interrupt_to_thread
STR R0, [R1]
/* set from thread to 0 */
LDR R1, =rt_interrupt_from_thread
MOVS R0, #0
STR R0, [R1]
/* set interrupt flag to 1 */
LDR R1, =rt_thread_switch_interrupt_flag
MOVS R0, #1
STR R0, [R1]
/* set the PendSV and SysTick exception priority */
LDR R0, =NVIC_SHPR3
LDR R1, =NVIC_PENDSV_PRI
LDR R2, [R0,#0x00] /* read */
ORRS R1, R1, R2 /* modify */
STR R1, [R0] /* write-back */
LDR R0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
LDR R1, =NVIC_PENDSVSET
STR R1, [R0]
NOP
/* restore MSP */
LDR R0, =SCB_VTOR
LDR R0, [R0]
LDR R0, [R0]
NOP
MSR MSP, R0
/* enable interrupts at processor level */
CPSIE I
/* ensure PendSV exception taken place before subsequent operation */
DSB
ISB
/* never reach here! */
/* compatible with old version */
.global rt_hw_interrupt_thread_switch
.type rt_hw_interrupt_thread_switch, %function
rt_hw_interrupt_thread_switch:
BX LR
NOP
.global HardFault_Handler
.type HardFault_Handler, %function
HardFault_Handler:
/* get current context */
MRS R0, PSP /* get fault thread stack pointer */
PUSH {LR}
BL rt_hw_hard_fault_exception
POP {PC}
/*
* rt_uint32_t rt_hw_interrupt_check(void);
* R0 --> state
*/
.global rt_hw_interrupt_check
.type rt_hw_interrupt_check, %function
rt_hw_interrupt_check:
MRS R0, IPSR
BX LR
|
vandercookking/h7_device_RTT
| 5,903
|
rt-thread/libcpu/arm/cortex-m0/context_rvds.S
|
;/*
; * Copyright (c) 2006-2022, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2010-01-25 Bernard first version
; * 2012-06-01 aozima set pendsv priority to 0xFF.
; * 2012-08-17 aozima fixed bug: store r8 - r11.
; * 2013-06-18 aozima add restore MSP feature.
; */
;/**
; * @addtogroup CORTEX-M0
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SHPR3 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
AREA |.text|, CODE, READONLY, ALIGN=2
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, PRIMASK
CPSID I
BX LR
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR PRIMASK, r0
BX LR
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch_interrupt
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOVS r3, #0x01
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
ENDP
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
PendSV_Handler PROC
EXPORT PendSV_Handler
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #0x00
BEQ pendsv_exit ; pendsv already handled
; clear rt_thread_switch_interrupt_flag to 0
MOVS r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CMP r1, #0x00
BEQ switch_to_thread ; skip register save at the first time
MRS r1, psp ; get from thread stack pointer
SUBS r1, r1, #0x20 ; space for {r4 - r7} and {r8 - r11}
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
STMIA r1!, {r4 - r7} ; push thread {r4 - r7} register to thread stack
MOV r4, r8 ; mov thread {r8 - r11} to {r4 - r7}
MOV r5, r9
MOV r6, r10
MOV r7, r11
STMIA r1!, {r4 - r7} ; push thread {r8 - r11} high register to thread stack
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
LDMIA r1!, {r4 - r7} ; pop thread {r4 - r7} register from thread stack
PUSH {r4 - r7} ; push {r4 - r7} to MSP for copy {r8 - r11}
LDMIA r1!, {r4 - r7} ; pop thread {r8 - r11} high register from thread stack to {r4 - r7}
MOV r8, r4 ; mov {r4 - r7} to {r8 - r11}
MOV r9, r5
MOV r10, r6
MOV r11, r7
POP {r4 - r7} ; pop {r4 - r7} from MSP
MSR psp, r1 ; update stack pointer
pendsv_exit
; restore interrupt
MSR PRIMASK, r2
MOVS r0, #0x04
RSBS r0, r0, #0x00
BX r0
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; * this fucntion is used to perform the first thread switch
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
; set to thread
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOVS r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOVS r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SHPR3
LDR r1, =NVIC_PENDSV_PRI
LDR r2, [r0,#0x00] ; read
ORRS r1,r1,r2 ; modify
STR r1, [r0] ; write-back
; trigger the PendSV exception (causes context switch)
LDR r0, =NVIC_INT_CTRL
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
MSR msp, r0
; enable interrupts at processor level
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
ENDP
; compatible with old version
rt_hw_interrupt_thread_switch PROC
EXPORT rt_hw_interrupt_thread_switch
BX lr
ENDP
IMPORT rt_hw_hard_fault_exception
HardFault_Handler PROC
EXPORT HardFault_Handler
; get current context
MRS r0, psp ; get fault thread stack pointer
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {pc}
ENDP
ALIGN 4
END
|
vandercookking/h7_device_RTT
| 6,016
|
rt-thread/libcpu/arm/cortex-a/context_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
#include "rtconfig.h"
.section .text, "ax"
#ifdef RT_USING_SMP
#define rt_hw_interrupt_disable rt_hw_local_irq_disable
#define rt_hw_interrupt_enable rt_hw_local_irq_enable
#endif
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
cpsid i
bx lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr, r0
bx lr
/*
* void rt_hw_context_switch_to(rt_uint32 to, struct rt_thread *to_thread);
* r0 --> to (thread stack)
* r1 --> to_thread
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
clrex
ldr sp, [r0] @ get new task stack pointer
#ifdef RT_USING_SMP
mov r0, r1
bl rt_cpus_lock_status_restore
#ifdef RT_USING_SMART
bl rt_thread_self
bl lwp_user_setting_restore
#endif
#else
#ifdef RT_USING_SMART
bl rt_thread_self
mov r4, r0
bl lwp_aspace_switch
mov r0, r4
bl lwp_user_setting_restore
#endif
#endif /*RT_USING_SMP*/
b rt_hw_context_switch_exit
.section .bss.share.isr
_guest_switch_lvl:
.word 0
.globl vmm_virq_update
.section .text.isr, "ax"
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to, struct rt_thread *to_thread);
* r0 --> from (from_thread stack)
* r1 --> to (to_thread stack)
* r2 --> to_thread
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
clrex
stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
stmfd sp!, {r0-r12, lr} @ push lr & register file
mrs r4, cpsr
tst lr, #0x01
orrne r4, r4, #0x20 @ it's thumb code
stmfd sp!, {r4} @ push cpsr
#ifdef RT_USING_SMART
stmfd sp, {r13, r14}^ @ push usr_sp usr_lr
sub sp, #8
#endif
#ifdef RT_USING_FPU
/* fpu context */
vmrs r6, fpexc
tst r6, #(1<<30)
beq 1f
vstmdb sp!, {d0-d15}
vstmdb sp!, {d16-d31}
vmrs r5, fpscr
stmfd sp!, {r5}
1:
stmfd sp!, {r6}
#endif
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
#ifdef RT_USING_SMP
mov r0, r2
bl rt_cpus_lock_status_restore
#ifdef RT_USING_SMART
bl rt_thread_self
bl lwp_user_setting_restore
#endif
#else
#ifdef RT_USING_SMART
bl rt_thread_self
mov r4, r0
bl lwp_aspace_switch
mov r0, r4
bl lwp_user_setting_restore
#endif
#endif /*RT_USING_SMP*/
b rt_hw_context_switch_exit
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.equ Mode_USR, 0x10
.equ Mode_FIQ, 0x11
.equ Mode_IRQ, 0x12
.equ Mode_SVC, 0x13
.equ Mode_ABT, 0x17
.equ Mode_UND, 0x1B
.equ Mode_SYS, 0x1F
.equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled
.equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
clrex
#ifdef RT_USING_SMP
/* r0 :svc_mod context
* r1 :addr of from_thread's sp
* r2 :addr of to_thread's sp
* r3 :to_thread's tcb
*/
#ifdef RT_USING_SMART
push {r0 - r3, lr}
#ifdef RT_USING_SMART
bl rt_thread_self
bl lwp_user_setting_save
#endif
pop {r0 - r3, lr}
#endif
str r0, [r1]
ldr sp, [r2]
mov r0, r3
#ifdef RT_USING_SMART
mov r4, r0
#endif
bl rt_cpus_lock_status_restore
#ifdef RT_USING_SMART
mov r0, r4
bl lwp_user_setting_restore
#endif
b rt_hw_context_switch_exit
#else /*RT_USING_SMP*/
/* r0 :addr of from_thread's sp
* r1 :addr of to_thread's sp
* r2 :from_thread's tcb
* r3 :to_thread's tcb
*/
#ifdef RT_USING_SMART
/* now to_thread(r3) not used */
ldr ip, =rt_thread_switch_interrupt_flag
ldr r3, [ip]
cmp r3, #1
beq _reswitch
ldr r3, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
str r0, [r3]
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r3, [ip]
#ifdef RT_USING_SMART
push {r1, lr}
mov r0, r2
bl lwp_user_setting_save
pop {r1, lr}
#endif
_reswitch:
ldr ip, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [ip]
bx lr
#else
/* now from_thread(r2) to_thread(r3) not used */
ldr ip, =rt_thread_switch_interrupt_flag
ldr r3, [ip]
cmp r3, #1
beq _reswitch
ldr r3, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
str r0, [r3]
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r3, [ip]
_reswitch:
ldr ip, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [ip]
bx lr
#endif
#endif /*RT_USING_SMP*/
.global rt_hw_context_switch_exit
rt_hw_context_switch_exit:
#ifdef RT_USING_SMP
#ifdef RT_USING_SIGNALS
mov r0, sp
cps #Mode_IRQ
bl rt_signal_check
cps #Mode_SVC
mov sp, r0
#endif
#endif
#ifdef RT_USING_FPU
/* fpu context */
ldmfd sp!, {r6}
vmsr fpexc, r6
tst r6, #(1<<30)
beq 1f
ldmfd sp!, {r5}
vmsr fpscr, r5
vldmia sp!, {d16-d31}
vldmia sp!, {d0-d15}
1:
#endif
#ifdef RT_USING_SMART
ldmfd sp, {r13, r14}^ /* usr_sp, usr_lr */
add sp, #8
#endif
ldmfd sp!, {r1}
msr spsr_cxsf, r1 /* original mode */
#ifdef RT_USING_SMART
and r1, #0x1f
cmp r1, #0x10
bne 1f
ldmfd sp!, {r0-r12,lr}
ldmfd sp!, {lr}
b arch_ret_to_user
1:
#endif
ldmfd sp!, {r0-r12,lr,pc}^ /* irq return */
#ifdef RT_USING_FPU
.global set_fpexc
set_fpexc:
vmsr fpexc, r0
bx lr
#endif
|
vandercookking/h7_device_RTT
| 16,692
|
rt-thread/libcpu/arm/cortex-a/start_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
* 2018-11-22 Jesven in the interrupt context, use rt_scheduler_do_irq_switch checks
* and switches to a new thread
*/
#include "rtconfig.h"
.equ Mode_USR, 0x10
.equ Mode_FIQ, 0x11
.equ Mode_IRQ, 0x12
.equ Mode_SVC, 0x13
.equ Mode_ABT, 0x17
.equ Mode_UND, 0x1B
.equ Mode_SYS, 0x1F
.equ I_Bit, 0x80 /* when I bit is set, IRQ is disabled */
.equ F_Bit, 0x40 /* when F bit is set, FIQ is disabled */
#ifdef RT_USING_SMART
.data
.align 14
init_mtbl:
.space 16*1024
#endif
.text
/* reset entry */
.globl _reset
_reset:
#ifdef ARCH_ARMV8
/* Check for HYP mode */
mrs r0, cpsr_all
and r0, r0, #0x1F
mov r8, #0x1A
cmp r0, r8
beq overHyped
b continue
overHyped: /* Get out of HYP mode */
adr r1, continue
msr ELR_hyp, r1
mrs r1, cpsr_all
and r1, r1, #0x1f /* CPSR_MODE_MASK */
orr r1, r1, #0x13 /* CPSR_MODE_SUPERVISOR */
msr SPSR_hyp, r1
eret
continue:
#endif
#ifdef SOC_BCM283x
/* Suspend the other cpu cores */
mrc p15, 0, r0, c0, c0, 5
ands r0, #3
bne _halt
/* Disable IRQ & FIQ */
cpsid if
/* Check for HYP mode */
mrs r0, cpsr_all
and r0, r0, #0x1F
mov r8, #0x1A
cmp r0, r8
beq overHyped
b continue
overHyped: /* Get out of HYP mode */
adr r1, continue
msr ELR_hyp, r1
mrs r1, cpsr_all
and r1, r1, #0x1f /* CPSR_MODE_MASK */
orr r1, r1, #0x13 /* CPSR_MODE_SUPERVISOR */
msr SPSR_hyp, r1
eret
continue:
/* set the cpu to SVC32 mode and disable interrupt */
mrs r0, cpsr
bic r0, r0, #0x1f
orr r0, r0, #0x13
msr cpsr_c, r0
#endif
/* invalid tlb before enable mmu */
mrc p15, 0, r0, c1, c0, 0
bic r0, #1
mcr p15, 0, r0, c1, c0, 0
dsb
isb
mov r0, #0
mcr p15, 0, r0, c8, c7, 0
mcr p15, 0, r0, c7, c5, 0 /* iciallu */
mcr p15, 0, r0, c7, c5, 6 /* bpiall */
dsb
isb
#ifdef RT_USING_SMART
/* load r5 with PV_OFFSET */
ldr r7, =_reset
adr r5, _reset
sub r5, r5, r7
mov r7, #0x100000
sub r7, #1
mvn r8, r7
ldr r9, =KERNEL_VADDR_START
ldr r6, =__bss_end
add r6, r7
and r6, r8 /* r6 end vaddr align up to 1M */
sub r6, r9 /* r6 is size */
ldr sp, =svc_stack_n_limit
add sp, r5 /* use paddr */
ldr r0, =init_mtbl
add r0, r5
mov r1, r6
mov r2, r5
bl init_mm_setup
ldr lr, =after_enable_mmu
ldr r0, =init_mtbl
add r0, r5
b enable_mmu
after_enable_mmu:
#endif
#ifndef SOC_BCM283x
/* set the cpu to SVC32 mode and disable interrupt */
cps #Mode_SVC
#endif
#ifdef RT_USING_FPU
mov r4, #0xfffffff
mcr p15, 0, r4, c1, c0, 2
#endif
/* disable the data alignment check */
mrc p15, 0, r1, c1, c0, 0
bic r1, #(1<<1) /* Disable Alignment fault checking */
#ifndef RT_USING_SMART
bic r1, #(1<<0) /* Disable MMU */
bic r1, #(1<<2) /* Disable data cache */
bic r1, #(1<<11) /* Disable program flow prediction */
bic r1, #(1<<12) /* Disable instruction cache */
bic r1, #(3<<19) /* bit[20:19] must be zero */
#endif /* RT_USING_SMART */
mcr p15, 0, r1, c1, c0, 0
#ifndef RT_USING_SMART
#ifdef RT_USING_SMP
/* Use spin-table to start secondary cores */
@ get cpu id, and subtract the offset from the stacks base address
bl rt_hw_cpu_id
mov r5, r0
cmp r5, #0 @ cpu id == 0
beq normal_setup
@ cpu id > 0, stop or wait
#ifdef RT_SMP_AUTO_BOOT
ldr r0, =secondary_cpu_entry
mov r1, #0
str r1, [r0] /* clean secondary_cpu_entry */
#endif /* RT_SMP_AUTO_BOOT */
secondary_loop:
@ cpu core 1 goes into sleep until core 0 wakeup it
wfe
#ifdef RT_SMP_AUTO_BOOT
ldr r1, =secondary_cpu_entry
ldr r0, [r1]
cmp r0, #0
blxne r0 /* if(secondary_cpu_entry) secondary_cpu_entry(); */
#endif /* RT_SMP_AUTO_BOOT */
b secondary_loop
normal_setup:
#endif /* RT_USING_SMP */
#endif /* RT_USING_SMART */
/* enable I cache + branch prediction */
mrc p15, 0, r0, c1, c0, 0
orr r0, r0, #(1<<12)
orr r0, r0, #(1<<11)
mcr p15, 0, r0, c1, c0, 0
/* setup stack */
bl stack_setup
/* clear .bss */
mov r0,#0 /* get a zero */
ldr r1,=__bss_start /* bss start */
ldr r2,=__bss_end /* bss end */
bss_loop:
cmp r1,r2 /* check if data to clear */
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_loop /* loop until done */
mov r0, r5
bl rt_kmem_pvoff_set
#ifdef RT_USING_SMP
mrc p15, 0, r1, c1, c0, 1
mov r0, #(1<<6)
orr r1, r0
mcr p15, 0, r1, c1, c0, 1 /* enable smp */
#endif
/**
* void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
* initialize the mmu table and enable mmu
*/
ldr r0, =platform_mem_desc
ldr r1, =platform_mem_desc_size
ldr r1, [r1]
bl rt_hw_init_mmu_table
#ifdef RT_USING_SMART
ldr r0, =MMUTable /* vaddr */
add r0, r5 /* to paddr */
bl rt_hw_mmu_switch
#else
bl rt_hw_mmu_init
#endif
/* start RT-Thread Kernel */
ldr pc, _rtthread_startup
_rtthread_startup:
.word rtthread_startup
.weak rt_asm_cpu_id
rt_asm_cpu_id:
mrc p15, 0, r0, c0, c0, 5
and r0, r0, #0xf
mov pc, lr
stack_setup:
#ifdef RT_USING_SMP
/* cpu id */
mov r10, lr
bl rt_asm_cpu_id
mov lr, r10
add r0, r0, #1
#else
mov r0, #1
#endif
cps #Mode_UND
ldr r1, =und_stack_n
add sp, r1, r0, asl #12
cps #Mode_IRQ
ldr r1, =irq_stack_n
add sp, r1, r0, asl #12
cps #Mode_FIQ
ldr r1, =irq_stack_n
add sp, r1, r0, asl #12
cps #Mode_ABT
ldr r1, =abt_stack_n
add sp, r1, r0, asl #12
cps #Mode_SVC
ldr r1, =svc_stack_n
add sp, r1, r0, asl #12
bx lr
#ifdef RT_USING_SMART
.align 2
.global enable_mmu
enable_mmu:
orr r0, #0x18
mcr p15, 0, r0, c2, c0, 0 /* ttbr0 */
mov r0, #(1 << 5) /* PD1=1 */
mcr p15, 0, r0, c2, c0, 2 /* ttbcr */
mov r0, #1
mcr p15, 0, r0, c3, c0, 0 /* dacr */
/* invalid tlb before enable mmu */
mov r0, #0
mcr p15, 0, r0, c8, c7, 0
mcr p15, 0, r0, c7, c5, 0 /* iciallu */
mcr p15, 0, r0, c7, c5, 6 /* bpiall */
mrc p15, 0, r0, c1, c0, 0
orr r0, #((1 << 12) | (1 << 11)) /* instruction cache, branch prediction */
orr r0, #((1 << 2) | (1 << 0)) /* data cache, mmu enable */
mcr p15, 0, r0, c1, c0, 0
dsb
isb
mov pc, lr
.global rt_hw_set_process_id
rt_hw_set_process_id:
LSL r0, r0, #8
MCR p15, 0, r0, c13, c0, 1
mov pc, lr
#endif
.global rt_hw_mmu_switch
rt_hw_mmu_switch:
orr r0, #0x18
mcr p15, 0, r0, c2, c0, 0 // ttbr0
//invalid tlb
mov r0, #0
mcr p15, 0, r0, c8, c7, 0
mcr p15, 0, r0, c7, c5, 0 //iciallu
mcr p15, 0, r0, c7, c5, 6 //bpiall
dsb
isb
mov pc, lr
.global rt_hw_mmu_tbl_get
rt_hw_mmu_tbl_get:
mrc p15, 0, r0, c2, c0, 0 /* ttbr0 */
bic r0, #0x18
mov pc, lr
_halt:
wfe
b _halt
#ifdef RT_USING_SMP
.global rt_secondary_cpu_entry
rt_secondary_cpu_entry:
#ifdef RT_USING_SMART
ldr r0, =_reset
adr r5, _reset
sub r5, r5, r0
ldr lr, =after_enable_mmu_n
ldr r0, =init_mtbl
add r0, r5
b enable_mmu
after_enable_mmu_n:
ldr r0, =MMUTable
add r0, r5
bl rt_hw_mmu_switch
#endif
#ifdef RT_USING_FPU
mov r4, #0xfffffff
mcr p15, 0, r4, c1, c0, 2
#endif
mrc p15, 0, r1, c1, c0, 1
mov r0, #(1<<6)
orr r1, r0
mcr p15, 0, r1, c1, c0, 1 /* enable smp */
mrc p15, 0, r0, c1, c0, 0
bic r0, #(1<<13)
mcr p15, 0, r0, c1, c0, 0
bl stack_setup
/* initialize the mmu table and enable mmu */
#ifndef RT_USING_SMART
bl rt_hw_mmu_init
#endif
b rt_hw_secondary_cpu_bsp_start
#endif
/* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */
.section .text.isr, "ax"
.align 5
.globl vector_fiq
vector_fiq:
stmfd sp!,{r0-r7,lr}
bl rt_hw_trap_fiq
ldmfd sp!,{r0-r7,lr}
subs pc, lr, #4
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_current_thread
.globl vmm_thread
.globl vmm_virq_check
.align 5
.globl vector_irq
vector_irq:
#ifdef RT_USING_SMP
stmfd sp!, {r0, r1}
cps #Mode_SVC
mov r0, sp /* svc_sp */
mov r1, lr /* svc_lr */
cps #Mode_IRQ
sub lr, #4
stmfd r0!, {r1, lr} /* svc_lr, svc_pc */
stmfd r0!, {r2 - r12}
ldmfd sp!, {r1, r2} /* original r0, r1 */
stmfd r0!, {r1 - r2}
mrs r1, spsr /* original mode */
stmfd r0!, {r1}
#ifdef RT_USING_SMART
stmfd r0, {r13, r14}^ /* usr_sp, usr_lr */
sub r0, #8
#endif
#ifdef RT_USING_FPU
/* fpu context */
vmrs r6, fpexc
tst r6, #(1<<30)
beq 1f
vstmdb r0!, {d0-d15}
vstmdb r0!, {d16-d31}
vmrs r5, fpscr
stmfd r0!, {r5}
1:
stmfd r0!, {r6}
#endif
/* now irq stack is clean */
/* r0 is task svc_sp */
/* backup r0 -> r8 */
mov r8, r0
cps #Mode_SVC
mov sp, r8
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
mov r0, r8
bl rt_scheduler_do_irq_switch
b rt_hw_context_switch_exit
#else
stmfd sp!, {r0-r12,lr}
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
/* if rt_thread_switch_interrupt_flag set, jump to
* rt_hw_context_switch_interrupt_do and don't return */
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq rt_hw_context_switch_interrupt_do
#ifdef RT_USING_SMART
ldmfd sp!, {r0-r12,lr}
cps #Mode_SVC
push {r0-r12}
mov r7, lr
cps #Mode_IRQ
mrs r4, spsr
sub r5, lr, #4
cps #Mode_SVC
and r6, r4, #0x1f
cmp r6, #0x10
bne 1f
msr spsr_csxf, r4
mov lr, r5
pop {r0-r12}
b arch_ret_to_user
1:
mov lr, r7
cps #Mode_IRQ
msr spsr_csxf, r4
mov lr, r5
cps #Mode_SVC
pop {r0-r12}
cps #Mode_IRQ
movs pc, lr
#else
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
#endif
rt_hw_context_switch_interrupt_do:
mov r1, #0 /* clear flag */
str r1, [r0]
mov r1, sp /* r1 point to {r0-r3} in stack */
add sp, sp, #4*4
ldmfd sp!, {r4-r12,lr} /* reload saved registers */
mrs r0, spsr /* get cpsr of interrupt thread */
sub r2, lr, #4 /* save old task's pc to r2 */
/* Switch to SVC mode with no interrupt. If the usr mode guest is
* interrupted, this will just switch to the stack of kernel space.
* save the registers in kernel space won't trigger data abort. */
msr cpsr_c, #I_Bit|F_Bit|Mode_SVC
stmfd sp!, {r2} /* push old task's pc */
stmfd sp!, {r4-r12,lr} /* push old task's lr,r12-r4 */
ldmfd r1, {r1-r4} /* restore r0-r3 of the interrupt thread */
stmfd sp!, {r1-r4} /* push old task's r0-r3 */
stmfd sp!, {r0} /* push old task's cpsr */
#ifdef RT_USING_SMART
stmfd sp, {r13, r14}^ /*push usr_sp, usr_lr */
sub sp, #8
#endif
#ifdef RT_USING_FPU
/* fpu context */
vmrs r6, fpexc
tst r6, #(1<<30)
beq 1f
vstmdb sp!, {d0-d15}
vstmdb sp!, {d16-d31}
vmrs r5, fpscr
stmfd sp!, {r5}
1:
stmfd sp!, {r6}
#endif
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5] /* store sp in preempted tasks's TCB */
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6] /* get new task's stack pointer */
#ifdef RT_USING_SMART
bl rt_thread_self
mov r4, r0
bl lwp_aspace_switch
mov r0, r4
bl lwp_user_setting_restore
#endif
#ifdef RT_USING_FPU
/* fpu context */
ldmfd sp!, {r6}
vmsr fpexc, r6
tst r6, #(1<<30)
beq 1f
ldmfd sp!, {r5}
vmsr fpscr, r5
vldmia sp!, {d16-d31}
vldmia sp!, {d0-d15}
1:
#endif
#ifdef RT_USING_SMART
ldmfd sp, {r13, r14}^ /*pop usr_sp, usr_lr */
add sp, #8
#endif
ldmfd sp!, {r4} /* pop new task's cpsr to spsr */
msr spsr_cxsf, r4
#ifdef RT_USING_SMART
and r4, #0x1f
cmp r4, #0x10
bne 1f
ldmfd sp!, {r0-r12,lr}
ldmfd sp!, {lr}
b arch_ret_to_user
1:
#endif
/* pop new task's r0-r12,lr & pc, copy spsr to cpsr */
ldmfd sp!, {r0-r12,lr,pc}^
#endif
.macro push_svc_reg
sub sp, sp, #17 * 4 /* Sizeof(struct rt_hw_exp_stack) */
stmia sp, {r0 - r12} /* Calling r0-r12 */
mov r0, sp
add sp, sp, #17 * 4
mrs r6, spsr /* Save CPSR */
str lr, [r0, #15*4] /* Push PC */
str r6, [r0, #16*4] /* Push CPSR */
and r1, r6, #0x1f
cmp r1, #0x10
cps #Mode_SYS
streq sp, [r0, #13*4] /* Save calling SP */
streq lr, [r0, #14*4] /* Save calling PC */
cps #Mode_SVC
strne sp, [r0, #13*4] /* Save calling SP */
strne lr, [r0, #14*4] /* Save calling PC */
.endm
.align 5
.weak vector_swi
vector_swi:
push_svc_reg
bl rt_hw_trap_swi
b .
.align 5
.globl vector_undef
vector_undef:
push_svc_reg
bl rt_hw_trap_undef
#ifdef RT_USING_FPU
cps #Mode_UND
sub sp, sp, #17 * 4
ldr lr, [sp, #15*4]
ldmia sp, {r0 - r12}
add sp, sp, #17 * 4
movs pc, lr
#endif
b .
.align 5
.globl vector_pabt
vector_pabt:
push_svc_reg
#ifdef RT_USING_SMART
/* cp Mode_ABT stack to SVC */
sub sp, sp, #17 * 4 /* Sizeof(struct rt_hw_exp_stack) */
mov lr, r0
ldmia lr, {r0 - r12}
stmia sp, {r0 - r12}
add r1, lr, #13 * 4
add r2, sp, #13 * 4
ldmia r1, {r4 - r7}
stmia r2, {r4 - r7}
mov r0, sp
bl rt_hw_trap_pabt
/* return to user */
ldr lr, [sp, #16*4] /* orign spsr */
msr spsr_cxsf, lr
ldr lr, [sp, #15*4] /* orign pc */
ldmia sp, {r0 - r12}
add sp, #17 * 4
b arch_ret_to_user
#else
bl rt_hw_trap_pabt
b .
#endif
.align 5
.globl vector_dabt
vector_dabt:
push_svc_reg
#ifdef RT_USING_SMART
/* cp Mode_ABT stack to SVC */
sub sp, sp, #17 * 4 /* Sizeof(struct rt_hw_exp_stack) */
mov lr, r0
ldmia lr, {r0 - r12}
stmia sp, {r0 - r12}
add r1, lr, #13 * 4
add r2, sp, #13 * 4
ldmia r1, {r4 - r7}
stmia r2, {r4 - r7}
mov r0, sp
bl rt_hw_trap_dabt
/* return to user */
ldr lr, [sp, #16*4] /* orign spsr */
msr spsr_cxsf, lr
ldr lr, [sp, #15*4] /* orign pc */
ldmia sp, {r0 - r12}
add sp, #17 * 4
b arch_ret_to_user
#else
bl rt_hw_trap_dabt
b .
#endif
.align 5
.globl vector_resv
vector_resv:
push_svc_reg
bl rt_hw_trap_resv
b .
.global rt_hw_clz
rt_hw_clz:
clz r0, r0
bx lr
#ifndef RT_CPUS_NR
#define RT_CPUS_NR 1
#endif
#include "asm-generic.h"
START_POINT(_thread_start)
mov r10, lr
blx r1
blx r10
b . /* never here */
START_POINT_END(_thread_start)
.bss
.align 3 /* align to 2~3=8 */
svc_stack_n:
.space (RT_CPUS_NR << 12)
svc_stack_n_limit:
irq_stack_n:
.space (RT_CPUS_NR << 12)
und_stack_n:
.space (RT_CPUS_NR << 12)
abt_stack_n:
.space (RT_CPUS_NR << 12)
|
vandercookking/h7_device_RTT
| 3,181
|
rt-thread/libcpu/arm/cortex-a/cp15_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
.globl rt_cpu_get_smp_id
rt_cpu_get_smp_id:
mrc p15, #0, r0, c0, c0, #5
bx lr
.globl rt_cpu_vector_set_base
rt_cpu_vector_set_base:
/* clear SCTRL.V to customize the vector address */
mrc p15, #0, r1, c1, c0, #0
bic r1, #(1 << 13)
mcr p15, #0, r1, c1, c0, #0
/* set up the vector address */
mcr p15, #0, r0, c12, c0, #0
dsb
bx lr
.globl rt_hw_cpu_dcache_enable
rt_hw_cpu_dcache_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x00000004
mcr p15, #0, r0, c1, c0, #0
bx lr
.globl rt_hw_cpu_icache_enable
rt_hw_cpu_icache_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x00001000
mcr p15, #0, r0, c1, c0, #0
bx lr
_FLD_MAX_WAY:
.word 0x3ff
_FLD_MAX_IDX:
.word 0x7fff
.globl rt_cpu_dcache_clean_flush
rt_cpu_dcache_clean_flush:
push {r4-r11}
dmb
mrc p15, #1, r0, c0, c0, #1 @ read clid register
ands r3, r0, #0x7000000 @ get level of coherency
mov r3, r3, lsr #23
beq finished
mov r10, #0
loop1:
add r2, r10, r10, lsr #1
mov r1, r0, lsr r2
and r1, r1, #7
cmp r1, #2
blt skip
mcr p15, #2, r10, c0, c0, #0
isb
mrc p15, #1, r1, c0, c0, #0
and r2, r1, #7
add r2, r2, #4
ldr r4, _FLD_MAX_WAY
ands r4, r4, r1, lsr #3
clz r5, r4
ldr r7, _FLD_MAX_IDX
ands r7, r7, r1, lsr #13
loop2:
mov r9, r4
loop3:
orr r11, r10, r9, lsl r5
orr r11, r11, r7, lsl r2
mcr p15, #0, r11, c7, c14, #2
subs r9, r9, #1
bge loop3
subs r7, r7, #1
bge loop2
skip:
add r10, r10, #2
cmp r3, r10
bgt loop1
finished:
dsb
isb
pop {r4-r11}
bx lr
.globl rt_cpu_icache_flush
rt_cpu_icache_flush:
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
dsb
isb
bx lr
.globl rt_hw_cpu_dcache_disable
rt_hw_cpu_dcache_disable:
push {r4-r11, lr}
bl rt_cpu_dcache_clean_flush
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #0x00000004
mcr p15, #0, r0, c1, c0, #0
pop {r4-r11, lr}
bx lr
.globl rt_hw_cpu_icache_disable
rt_hw_cpu_icache_disable:
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #0x00001000
mcr p15, #0, r0, c1, c0, #0
bx lr
.globl rt_cpu_mmu_disable
rt_cpu_mmu_disable:
mcr p15, #0, r0, c8, c7, #0 @ invalidate tlb
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #1
mcr p15, #0, r0, c1, c0, #0 @ clear mmu bit
dsb
bx lr
.globl rt_cpu_mmu_enable
rt_cpu_mmu_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x001
mcr p15, #0, r0, c1, c0, #0 @ set mmu enable bit
dsb
bx lr
.globl rt_cpu_tlb_set
rt_cpu_tlb_set:
mcr p15, #0, r0, c2, c0, #0
dmb
bx lr
|
vandercookking/h7_device_RTT
| 10,121
|
rt-thread/libcpu/avr32/uc3/exception_gcc.S
|
/* This file is part of the ATMEL AVR32-UC3-SoftwareFramework-1.6.0 Release */
/*This file is prepared for Doxygen automatic documentation generation.*/
/*! \file *********************************************************************
*
* \brief Exception and interrupt vectors.
*
* This file maps all events supported by an AVR32.
*
* - Compiler: GNU GCC for AVR32
* - Supported devices: All AVR32 devices with an INTC module can be used.
* - AppNote:
*
* \author Atmel Corporation: http://www.atmel.com \n
* Support and FAQ: http://support.atmel.no/
*
******************************************************************************/
/* Copyright (c) 2009 Atmel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. The name of Atmel may not be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* 4. This software may only be redistributed and used in connection with an Atmel
* AVR product.
*
* THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* EXPRESSLY AND SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
*
*/
#if !__AVR32_UC__ && !__AVR32_AP__
#error Implementation of the AVR32 architecture not supported by the INTC driver.
#endif
#include <avr32/io.h>
//! @{
//! \verbatim
.section .exception, "ax", @progbits
// Start of Exception Vector Table.
// EVBA must be aligned with a power of two strictly greater than the EVBA-
// relative offset of the last vector.
.balign 0x200
// Export symbol.
.global _evba
.type _evba, @function
_evba:
.org 0x000
// Unrecoverable Exception.
_handle_Unrecoverable_Exception:
rjmp $
.org 0x004
// TLB Multiple Hit.
_handle_TLB_Multiple_Hit:
rjmp $
.org 0x008
// Bus Error Data Fetch.
_handle_Bus_Error_Data_Fetch:
rjmp $
.org 0x00C
// Bus Error Instruction Fetch.
_handle_Bus_Error_Instruction_Fetch:
rjmp $
.org 0x010
// NMI.
_handle_NMI:
rjmp $
.org 0x014
// Instruction Address.
_handle_Instruction_Address:
rjmp $
.org 0x018
// ITLB Protection.
_handle_ITLB_Protection:
rjmp $
.org 0x01C
// Breakpoint.
_handle_Breakpoint:
rjmp $
.org 0x020
// Illegal Opcode.
_handle_Illegal_Opcode:
rjmp $
.org 0x024
// Unimplemented Instruction.
_handle_Unimplemented_Instruction:
rjmp $
.org 0x028
// Privilege Violation.
_handle_Privilege_Violation:
rjmp $
.org 0x02C
// Floating-Point: UNUSED IN AVR32UC and AVR32AP.
_handle_Floating_Point:
rjmp $
.org 0x030
// Coprocessor Absent: UNUSED IN AVR32UC.
_handle_Coprocessor_Absent:
rjmp $
.org 0x034
// Data Address (Read).
_handle_Data_Address_Read:
rjmp $
.org 0x038
// Data Address (Write).
_handle_Data_Address_Write:
rjmp $
.org 0x03C
// DTLB Protection (Read).
_handle_DTLB_Protection_Read:
rjmp $
.org 0x040
// DTLB Protection (Write).
_handle_DTLB_Protection_Write:
rjmp $
.org 0x044
// DTLB Modified: UNUSED IN AVR32UC.
_handle_DTLB_Modified:
rjmp $
.org 0x050
// ITLB Miss.
_handle_ITLB_Miss:
rjmp $
.org 0x060
// DTLB Miss (Read).
_handle_DTLB_Miss_Read:
rjmp $
.org 0x070
// DTLB Miss (Write).
_handle_DTLB_Miss_Write:
rjmp $
.org 0x100
// Supervisor Call.
_handle_Supervisor_Call:
rjmp $
// Interrupt support.
// The interrupt controller must provide the offset address relative to EVBA.
// Important note:
// All interrupts call a C function named _get_interrupt_handler.
// This function will read group and interrupt line number to then return in
// R12 a pointer to a user-provided interrupt handler.
.balign 4
_int0:
mov r12, 0 // Pass the int_level parameter to the _get_interrupt_handler function.
call _get_interrupt_handler
cp.w r12, 0 // Get the pointer to the interrupt handler returned by the function.
breq _spint0 // If this was not a spurious interrupt (R12 != NULL), jump to the handler.
call rt_interrupt_enter
icall r12
call rt_interrupt_leave
ssrf AVR32_SR_GM_OFFSET /* Disable global interrupt */
lda.w r12, rt_interrupt_nest /* Is nested interrupt? */
ld.w r11, r12[0]
cp.w r11, 0
brne _spint0
lda.w r12, rt_thread_switch_interrupt_flag /* Is thread switch required? */
ld.w r11, r12[0]
cp.w r11, 1
breq rt_hw_context_switch_interrupt_do
_spint0:
csrf AVR32_SR_GM_OFFSET /* Enable global interrupt */
rete // If this was a spurious interrupt (R12 == NULL), return from event handler.
_int1:
mov r12, 1 // Pass the int_level parameter to the _get_interrupt_handler function.
call _get_interrupt_handler
cp.w r12, 0 // Get the pointer to the interrupt handler returned by the function.
breq _spint1 // If this was not a spurious interrupt (R12 != NULL), jump to the handler.
call rt_interrupt_enter
icall r12
call rt_interrupt_leave
ssrf AVR32_SR_GM_OFFSET /* Disable global interrupt */
lda.w r12, rt_interrupt_nest /* Is nested interrupt? */
ld.w r11, r12[0]
cp.w r11, 0
brne _spint1
lda.w r12, rt_thread_switch_interrupt_flag /* Is thread switch required? */
ld.w r11, r12[0]
cp.w r11, 1
breq rt_hw_context_switch_interrupt_do
_spint1:
csrf AVR32_SR_GM_OFFSET /* Enable global interrupt */
rete // If this was a spurious interrupt (R12 == NULL), return from event handler.
_int2:
mov r12, 2 // Pass the int_level parameter to the _get_interrupt_handler function.
call _get_interrupt_handler
cp.w r12, 0 // Get the pointer to the interrupt handler returned by the function.
breq _spint2 // If this was not a spurious interrupt (R12 != NULL), jump to the handler.
call rt_interrupt_enter
icall r12
call rt_interrupt_leave
ssrf AVR32_SR_GM_OFFSET /* Disable global interrupt */
lda.w r12, rt_interrupt_nest /* Is nested interrupt? */
ld.w r11, r12[0]
cp.w r11, 0
brne _spint2
lda.w r12, rt_thread_switch_interrupt_flag /* Is thread switch required? */
ld.w r11, r12[0]
cp.w r11, 1
breq rt_hw_context_switch_interrupt_do
_spint2:
csrf AVR32_SR_GM_OFFSET /* Enable global interrupt */
rete // If this was a spurious interrupt (R12 == NULL), return from event handler.
_int3:
mov r12, 3 // Pass the int_level parameter to the _get_interrupt_handler function.
call _get_interrupt_handler
cp.w r12, 0 // Get the pointer to the interrupt handler returned by the function.
breq _spint3 // If this was not a spurious interrupt (R12 != NULL), jump to the handler.
call rt_interrupt_enter
icall r12
call rt_interrupt_leave
ssrf AVR32_SR_GM_OFFSET /* Disable global interrupt */
lda.w r12, rt_interrupt_nest /* Is nested interrupt? */
ld.w r11, r12[0]
cp.w r11, 0
brne _spint3
lda.w r12, rt_thread_switch_interrupt_flag /* Is thread switch required? */
ld.w r11, r12[0]
cp.w r11, 1
breq rt_hw_context_switch_interrupt_do
_spint3:
csrf AVR32_SR_GM_OFFSET /* Enable global interrupt */
rete // If this was a spurious interrupt (R12 == NULL), return from event handler.
rt_hw_context_switch_interrupt_do:
mov r11, 0
st.w r12[0], r11 /* Clear rt_thread_switch_interrupt_flag */
stm --sp, r0-r7 /* Push R0-R7 */
lda.w r12, rt_interrupt_from_thread /* Get old thread SP */
ld.w r12, r12[0]
lda.w r11, rt_interrupt_to_thread /* Get new thread SP */
ld.w r11, r11[0]
st.w r12[0], sp /* Store old thread SP */
ld.w sp, r11[0] /* Load new thread SP */
ldm sp++, r0-r7 /* Pop R0-R7 (new thread) */
rete /* RETE pops R8-R12, LR, PC, SR automatically */
// Constant data area.
.balign 4
// Values to store in the interrupt priority registers for the various interrupt priority levels.
// The interrupt priority registers contain the interrupt priority level and
// the EVBA-relative interrupt vector offset.
.global ipr_val
.type ipr_val, @object
ipr_val:
.word (AVR32_INTC_INT0 << AVR32_INTC_IPR_INTLEVEL_OFFSET) | (_int0 - _evba),\
(AVR32_INTC_INT1 << AVR32_INTC_IPR_INTLEVEL_OFFSET) | (_int1 - _evba),\
(AVR32_INTC_INT2 << AVR32_INTC_IPR_INTLEVEL_OFFSET) | (_int2 - _evba),\
(AVR32_INTC_INT3 << AVR32_INTC_IPR_INTLEVEL_OFFSET) | (_int3 - _evba)
//! \endverbatim
//! @}
|
vandercookking/h7_device_RTT
| 2,732
|
rt-thread/libcpu/avr32/uc3/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2010-03-27 Kyle First version
*/
#define AVR32_SR 0
#define AVR32_SR_GM_OFFSET 16
.text
/*
* rt_base_t rt_hw_interrupt_disable()
*/
.globl rt_hw_interrupt_disable
.type rt_hw_interrupt_disable, %function
rt_hw_interrupt_disable:
ssrf AVR32_SR_GM_OFFSET
mov pc, lr
/*
* void rt_hw_interrupt_enable(rt_base_t level)
*/
.globl rt_hw_interrupt_enable
.type rt_hw_interrupt_enable, %function
rt_hw_interrupt_enable:
csrf AVR32_SR_GM_OFFSET
mov pc, lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to)/*
* r8 --> from
* r9 --> to
*/
.globl rt_hw_context_switch
.type rt_hw_context_switch, %function
rt_hw_context_switch:
ssrf AVR32_SR_GM_OFFSET /* Disable global interrupt */
stm --sp, r8-r12, lr /* Push R8-R12, LR */
st.w --sp, lr /* Push LR (instead of PC) */
mfsr r8, AVR32_SR /* Read Status Register */
cbr r8, AVR32_SR_GM_OFFSET /* Clear GM bit */
st.w --sp, r8 /* Push SR */
stm --sp, r0-r7 /* Push R0-R7 */
/* Stack layout: R8-R12, LR, PC, SR, R0-R7 */
st.w r12[0], sp /* Store SP in preempted tasks TCB */
ld.w sp, r11[0] /* Get new task stack pointer */
ldm sp++, r0-r7 /* pop R0-R7 */
ld.w r8, sp++ /* pop SR */
mtsr AVR32_SR, r8 /* Restore SR */
ldm sp++, r8-r12, lr, pc/* Pop R8-R12, LR, PC and resume to thread */
/*
* void rt_hw_context_switch_to(rt_uint32 to)/*
* r0 --> to
*/
.globl rt_hw_context_switch_to
.type rt_hw_context_switch_to, %function
rt_hw_context_switch_to:
ld.w sp, r12[0] /* Get new task stack pointer */
ldm sp++, r0-r7 /* pop R0-R7 */
ld.w r8, sp++ /* pop SR */
mtsr AVR32_SR, r8 /* Restore SR */
ldm sp++, r8-r12, lr, pc/* Pop R8-R12, LR, PC and resume execution */
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to)/*
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
.type rt_hw_context_switch_interrupt, %function
rt_hw_context_switch_interrupt:
lda.w r8, rt_thread_switch_interrupt_flag
ld.w r9, r8[0]
cp.w r9, 1
breq _reswitch
mov r9, 1
st.w r8[0], r9
lda.w r8, rt_interrupt_from_thread
st.w r8[0], r12
_reswitch:
lda.w r8, rt_interrupt_to_thread
st.w r8[0], r11
mov pc, lr
|
vandercookking/h7_device_RTT
| 4,120
|
rt-thread/libcpu/blackfin/bf53x/context_vdsp.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-02-13 mojingxian First version
*/
.global _rt_hw_interrupt_disable;
.global _rt_hw_interrupt_enable;
.global _interrupt_thread_switch;
.extern _rt_interrupt_from_thread;
.extern _rt_interrupt_to_thread;
.extern _rt_thread_switch_interrupt_flag;
.section/DOUBLE64 program;
/*
* rt_base_t rt_hw_interrupt_disable();
* return value in R0.
*/
_rt_hw_interrupt_disable:
CLI R0;
_rt_hw_interrupt_disable.end:
NOP;
NOP;
NOP;
RTS;
/*
* void rt_hw_interrupt_enable(rt_base_t level);
* R0->level
*/
_rt_hw_interrupt_enable:
STI R0;
_rt_hw_interrupt_enable.end:
NOP;
NOP;
NOP;
RTS;
_interrupt_thread_switch:
/* Save context, interrupts disabled by IPEND[4] bit */
[ -- SP ] = R0;
[ -- SP ] = P1;
[ -- SP ] = RETS;
[ -- SP ] = R1;
[ -- SP ] = R2;
[ -- SP ] = P0;
[ -- SP ] = P2;
[ -- SP ] = ASTAT;
R1 = RETI; /* IPEND[4] is currently set, globally disabling interrupts */
/* IPEND[4] will stay set when RETI is saved through R1 */
[ -- SP ] = R1;
[ -- SP ] = (R7:3, P5:3);
[ -- SP ] = FP;
[ -- SP ] = I0;
[ -- SP ] = I1;
[ -- SP ] = I2;
[ -- SP ] = I3;
[ -- SP ] = B0;
[ -- SP ] = B1;
[ -- SP ] = B2;
[ -- SP ] = B3;
[ -- SP ] = L0;
[ -- SP ] = L1;
[ -- SP ] = L2;
[ -- SP ] = L3;
[ -- SP ] = M0;
[ -- SP ] = M1;
[ -- SP ] = M2;
[ -- SP ] = M3;
R1.L = A0.x;
[ -- SP ] = R1;
R1 = A0.w;
[ -- SP ] = R1;
R1.L = A1.x;
[ -- SP ] = R1;
R1 = A1.w;
[ -- SP ] = R1;
[ -- SP ] = LC0;
R3 = 0;
LC0 = R3;
[ -- SP ] = LC1;
R3 = 0;
LC1 = R3;
[ -- SP ] = LT0;
[ -- SP ] = LT1;
[ -- SP ] = LB0;
[ -- SP ] = LB1;
/* Context save done so save SP in the TCB */
P1.h = _rt_interrupt_from_thread;
P1.l = _rt_interrupt_from_thread;
P2 = [ P1 ];
[ P2 ] = SP;
/* clear rt_thread_switch_interrupt_flag to 0 */
P1.h = _rt_thread_switch_interrupt_flag;
P1.l = _rt_thread_switch_interrupt_flag;
R0 = 0;
[ P1 ] = R0;
/* Get a pointer to the high ready task's TCB */
P1.h = _rt_interrupt_to_thread;
P1.l = _rt_interrupt_to_thread;
P2 = [ P1 ];
SP = [ P2 ];
/* Restoring CPU context and return to task */
LB1 = [ SP ++ ];
LB0 = [ SP ++ ];
LT1 = [ SP ++ ];
LT0 = [ SP ++ ];
LC1 = [ SP ++ ];
LC0 = [ SP ++ ];
R0 = [ SP ++ ];
A1 = R0;
R0 = [ SP ++ ];
A1.x = R0.L;
R0 = [ SP ++ ];
A0 = R0;
R0 = [ SP ++ ];
A0.x = R0.L;
M3 = [ SP ++ ];
M2 = [ SP ++ ];
M1 = [ SP ++ ];
M0 = [ SP ++ ];
L3 = [ SP ++ ];
L2 = [ SP ++ ];
L1 = [ SP ++ ];
L0 = [ SP ++ ];
B3 = [ SP ++ ];
B2 = [ SP ++ ];
B1 = [ SP ++ ];
B0 = [ SP ++ ];
I3 = [ SP ++ ];
I2 = [ SP ++ ];
I1 = [ SP ++ ];
I0 = [ SP ++ ];
FP = [ SP ++ ];
(R7:3, P5:3) = [ SP ++ ];
RETI = [ SP ++ ]; /* IPEND[4] will stay set when RETI popped from stack */
ASTAT = [ SP ++ ];
P2 = [ SP ++ ];
P0 = [ SP ++ ];
R2 = [ SP ++ ];
R1 = [ SP ++ ];
RETS = [ SP ++ ];
P1 = [ SP ++ ];
R0 = [ SP ++ ];
_interrupt_thread_switch.end:
RTI;
|
vandercookking/h7_device_RTT
| 4,021
|
rt-thread/libcpu/v850/70f34/context_iar.S
|
#include "macdefs.inc"
name OS_Core
COMMON INTVEC:CODE
;********************************************************************
;
; function:
; description: Trap 0x10 vector used for context switch
; Right now, all TRAPs to $1x are trated the same way
;
org 50h
jr OSCtxSW
;********************************************************************
;
; function:
; description: Timer 40 compare match interrupt used for system
; tick interrupt
;
org 0x220
jr OSTickIntr
org 0x0520
jr uarta1_int_r
RSEG CODE(1)
EXTERN rt_thread_switch_interrupt_flag
EXTERN rt_interrupt_from_thread
EXTERN rt_interrupt_to_thread
EXTERN rt_interrupt_enter
EXTERN rt_interrupt_leave
EXTERN rt_tick_increase
EXTERN uarta1_receive_handler
PUBLIC rt_hw_interrupt_disable
PUBLIC rt_hw_interrupt_enable
PUBLIC rt_hw_context_switch_to
PUBLIC OSCtxSW
PUBLIC OS_Restore_CPU_Context
rt_hw_interrupt_disable:
stsr psw, r1
di
jmp [lp]
rt_hw_interrupt_enable:
ldsr r1, psw
jmp [lp]
OS_Restore_CPU_Context:
mov sp, ep
sld.w 4[ep], r2
sld.w 8[ep], r5
sld.w 12[ep],r6
sld.w 16[ep],r7
sld.w 20[ep],r8
sld.w 24[ep],r9
sld.w 28[ep],r10
sld.w 32[ep],r11
sld.w 36[ep],r12
sld.w 40[ep],r13
sld.w 44[ep],r14
sld.w 48[ep],r15
sld.w 52[ep],r16
;See what was the latest interruption (trap or interrupt)
stsr ecr, r17 ;Move ecr to r17
mov 0x050,r1
cmp r1, r17 ;If latest break was due to TRAP, set EP
be _SetEP
_ClrEP:
mov 0x20, r17 ;Set only ID
ldsr r17, psw
;Restore caller address
sld.w 56[ep], r1
ldsr r1, EIPC
;Restore PSW
sld.w 60[ep], r1
andi 0xffdf,r1,r1
ldsr r1, EIPSW
sld.w 0[ep], r1
dispose (8+(4*14)),{r23,r24,r25,r26,r27,r28,r29,r30,r31}
;Return from interrupt starts new task!
reti
_SetEP:
mov 0x60, r17 ;Set both EIPC and ID bits
ldsr r17, psw
;Restore caller address
sld.w 56[ep], r1
ldsr r1, EIPC
;Restore PSW
sld.w 60[ep], r1
andi 0xffdf,r1,r1
ldsr r1, EIPSW
sld.w 0[ep], r1
dispose (8+(4*14)),{r23,r24,r25,r26,r27,r28,r29,r30,r31}
;Return from interrupt starts new task!
reti
//rseg CODE:CODE
//public rt_hw_context_switch_to
rt_hw_context_switch_to:
;Load stack pointer of the task to run
ld.w 0[r1], sp ;load sp from struct
;Restore all Processor registers from stack and return from interrupt
jr OS_Restore_CPU_Context
OSCtxSW:
SAVE_CPU_CTX ;Save all CPU registers
mov rt_interrupt_from_thread, r21
ld.w 0[r21], r21
st.w sp, 0[r21]
mov rt_interrupt_to_thread, r1
ld.w 0[r1], r1
ld.w 0[r1], sp
;Restore all Processor registers from stack and return from interrupt
jr OS_Restore_CPU_Context
rt_hw_context_switch_interrupt_do:
mov rt_thread_switch_interrupt_flag, r8
mov 0, r9
st.b r9, 0[r8]
mov rt_interrupt_from_thread, r21
ld.w 0[r21], r21
st.w sp, 0[r21]
mov rt_interrupt_to_thread, r1
ld.w 0[r1], r1
ld.w 0[r1], sp
jr OS_Restore_CPU_Context
OSTickIntr:
SAVE_CPU_CTX ;Save current task's registers
jarl rt_interrupt_enter,lp
jarl rt_tick_increase,lp
jarl rt_interrupt_leave,lp
mov rt_thread_switch_interrupt_flag, r8
ld.w 0[r8],r9
cmp 1, r9
be rt_hw_context_switch_interrupt_do
jr OS_Restore_CPU_Context
uarta1_int_r:
SAVE_CPU_CTX ;Save current task's registers
jarl rt_interrupt_enter,lp
jarl uarta1_receive_handler,lp
jarl rt_interrupt_leave,lp
mov rt_thread_switch_interrupt_flag, r8
ld.w 0[r8],r9
cmp 1, r9
be rt_hw_context_switch_interrupt_do
jr OS_Restore_CPU_Context
END
|
vandercookking/h7_device_RTT
| 1,394
|
rt-thread/libcpu/m16c/m16c62p/context_iar.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2010-04-09 fify the first version
* 2010-04-19 fify rewrite rt_hw_interrupt_disable/enable fuction
* 2010-04-20 fify move peripheral ISR to bsp/interrupts.s34
*/
RSEG CSTACK
RSEG ISTACK
RSEG CODE(1)
EXTERN rt_interrupt_from_thread
EXTERN rt_interrupt_to_thread
PUBLIC rt_hw_interrupt_disable
PUBLIC rt_hw_interrupt_enable
PUBLIC rt_hw_context_switch_to
PUBLIC os_context_switch
rt_hw_interrupt_disable:
STC FLG, R0 ;fify 20100419
FCLR I
RTS
rt_hw_interrupt_enable:
LDC R0, FLG ;fify 20100419
RTS
.EVEN
os_context_switch:
PUSHM R0,R1,R2,R3,A0,A1,SB,FB
MOV.W rt_interrupt_from_thread, A0
STC ISP, [A0]
MOV.W rt_interrupt_to_thread, A0
LDC [A0], ISP
POPM R0,R1,R2,R3,A0,A1,SB,FB ; Restore registers from the new task's stack
REIT ; Return from interrup
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
* this fucntion is used to perform the first thread switch
*/
rt_hw_context_switch_to:
MOV.W R0, A0
LDC [A0], ISP
POPM R0,R1,R2,R3,A0,A1,SB,FB
REIT
END
|
vandercookking/h7_device_RTT
| 1,219
|
rt-thread/libcpu/m16c/m16c62p/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2010-04-09 fify the first version
* 2010-04-19 fify rewrite rt_hw_interrupt_disable/enable fuction
* 2010-04-20 fify move peripheral ISR to bsp/interrupts.s34
*/
.section .text
.globl _rt_interrupt_from_thread
.globl _rt_interrupt_to_thread
.global _os_context_switch
.type _os_context_switch, @function
_os_context_switch:
PUSHM R0,R1,R2,R3,A0,A1,SB,FB
MOV.W _rt_interrupt_from_thread, A0
STC ISP, [A0]
MOV.W _rt_interrupt_to_thread, A0
LDC [A0], ISP
POPM R0,R1,R2,R3,A0,A1,SB,FB ; Restore registers from the new task's stack
REIT ; Return from interrup
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* this fucntion is used to perform the first thread switch
*/
.global _rt_hw_context_switch_to
.type _rt_hw_context_switch_to, @function
_rt_hw_context_switch_to:
ENTER #0x0
MOV.W 0x5[FB], A0
LDC [A0], ISP
POPM R0,R1,R2,R3,A0,A1,SB,FB
REIT
.end
|
vandercookking/h7_device_RTT
| 5,972
|
rt-thread/libcpu/nios/nios_ii/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-02-14 aozima first implementation for Nios II
* 2011-02-20 aozima fix context&switch bug
*/
/**
* @addtogroup NIOS_II
*/
/*@{*/
.text
.set noat
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.global rt_hw_interrupt_disable
.type rt_hw_interrupt_disable, %function
rt_hw_interrupt_disable:
rdctl r2, status /* return status */
wrctl status, zero /* disable interrupt */
ret
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.global rt_hw_interrupt_enable
.type rt_hw_interrupt_enable, %function
rt_hw_interrupt_enable:
wrctl status, r4 /* enable interrupt by argument */
ret
/* void rt_hw_context_switch_interrupt_do(void) */
.global rt_hw_context_switch_interrupt_do
.type rt_hw_context_switch_interrupt_do, %function
rt_hw_context_switch_interrupt_do:
/* save from thread */
addi sp,sp,-72
/* frist save r2,so that save status */
stw r2, 4(sp)
/* save status */
/* when the interrupt happen,the interrupt is enable */
movi r2, 1
stw r2, 68(sp) /* status */
stw r3, 8(sp)
stw r4, 12(sp)
/* get & save from thread pc */
ldw r4,%gprel(rt_current_thread_entry)(gp)
stw r4, 0(sp) /* thread pc */
stw r5, 16(sp)
stw r6, 20(sp)
stw r7, 24(sp)
stw r16, 28(sp)
stw r17, 32(sp)
stw r18, 36(sp)
stw r19, 40(sp)
stw r20, 44(sp)
stw r21, 48(sp)
stw r22, 52(sp)
stw r23, 56(sp)
stw fp, 60(sp)
stw ra, 64(sp)
/* save from thread sp */
/* rt_interrupt_from_thread = &from_thread->sp */
ldw r4, %gprel(rt_interrupt_from_thread)(gp)
/* *r4(from_thread->sp) = sp */
stw sp, (r4)
/* clear rt_thread_switch_interrupt_flag */
/* rt_thread_switch_interrupt_flag = 0 */
stw zero,%gprel(rt_thread_switch_interrupt_flag)(gp)
/* load to thread sp */
/* r4 = rt_interrupt_to_thread(&to_thread->sp) */
ldw r4, %gprel(rt_interrupt_to_thread)(gp)
/* sp = to_thread->sp */
ldw sp, (r4)
ldw r2, 68(sp) /* status */
wrctl estatus, r2
ldw ea, 0(sp) /* thread pc */
ldw r2, 4(sp)
ldw r3, 8(sp)
ldw r4, 12(sp)
ldw r5, 16(sp)
ldw r6, 20(sp)
ldw r7, 24(sp)
ldw r16, 28(sp)
ldw r17, 32(sp)
ldw r18, 36(sp)
ldw r19, 40(sp)
ldw r20, 44(sp)
ldw r21, 48(sp)
ldw r22, 52(sp)
ldw r23, 56(sp)
ldw fp, 60(sp)
ldw ra, 64(sp)
addi sp, sp, 72
/* estatus --> status,ea --> pc */
eret
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r4: from
* r5: to
*/
.global rt_hw_context_switch
.type rt_hw_context_switch, %function
rt_hw_context_switch:
/* save from thread */
addi sp,sp,-72
/* frist save r2,so that save status */
stw r2, 4(sp)
/* save status */
rdctl r2, status
stw r2, 68(sp) /* status */
stw ra, 0(sp) /* return from rt_hw_context_switch */
stw r3, 8(sp)
stw r4, 12(sp)
stw r5, 16(sp)
stw r6, 20(sp)
stw r7, 24(sp)
stw r16, 28(sp)
stw r17, 32(sp)
stw r18, 36(sp)
stw r19, 40(sp)
stw r20, 44(sp)
stw r21, 48(sp)
stw r22, 52(sp)
stw r23, 56(sp)
stw fp, 60(sp)
stw ra, 64(sp)
/* save form thread sp */
/* from_thread->sp(r4) = sp */
stw sp, (r4)
/* update rt_interrupt_from_thread */
/* rt_interrupt_from_thread = r4(from_thread->sp) */
stw r4,%gprel(rt_interrupt_from_thread)(gp)
/* update rt_interrupt_to_thread */
/* rt_interrupt_to_thread = r5 */
stw r5,%gprel(rt_interrupt_to_thread)(gp)
/* get to thread sp */
/* sp = rt_interrupt_to_thread(r5:to_thread->sp) */
ldw sp, (r5)
ldw r2, 68(sp) /* status */
wrctl estatus, r2
ldw ea, 0(sp) /* thread pc */
ldw r2, 4(sp)
ldw r3, 8(sp)
ldw r4, 12(sp)
ldw r5, 16(sp)
ldw r6, 20(sp)
ldw r7, 24(sp)
ldw r16, 28(sp)
ldw r17, 32(sp)
ldw r18, 36(sp)
ldw r19, 40(sp)
ldw r20, 44(sp)
ldw r21, 48(sp)
ldw r22, 52(sp)
ldw r23, 56(sp)
ldw fp, 60(sp)
ldw ra, 64(sp)
addi sp, sp, 72
/* estatus --> status,ea --> pc */
eret
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
* r4: from
* r5: to
*/
.global rt_hw_context_switch_interrupt
.type rt_hw_context_switch_interrupt, %function
rt_hw_context_switch_interrupt:
/* if( rt_thread_switch_interrupt_flag != 0 ) _from_thread_not_change */
ldw r2,%gprel(rt_thread_switch_interrupt_flag)(gp)
bne r2,zero,_from_thread_not_change
_from_thread_change:
/* save ea -> rt_current_thread_entry */
addi ea,ea,-4
stw ea,%gprel(rt_current_thread_entry)(gp)
/* set rt_thread_switch_interrupt_flag to 1 */
movi r2, 1
stw r2,%gprel(rt_thread_switch_interrupt_flag)(gp)
/* update rt_interrupt_from_thread */
stw r4,%gprel(rt_interrupt_from_thread)(gp)
_from_thread_not_change:
/* update rt_interrupt_to_thread */
stw r5,%gprel(rt_interrupt_to_thread)(gp)
ret
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r4: to
*/
.global rt_hw_context_switch_to
.type rt_hw_context_switch_to, %function
rt_hw_context_switch_to:
/* save to thread */
stw r4,%gprel(rt_interrupt_to_thread)(gp)
/* get sp */
ldw sp, (r4) // sp = *r4
ldw r2, 68(sp) /* status */
wrctl estatus, r2
ldw ea, 0(sp) /* thread entry */
ldw r2, 4(sp)
ldw r3, 8(sp)
ldw r4, 12(sp)
ldw r5, 16(sp)
ldw r6, 20(sp)
ldw r7, 24(sp)
ldw r16, 28(sp)
ldw r17, 32(sp)
ldw r18, 36(sp)
ldw r19, 40(sp)
ldw r20, 44(sp)
ldw r21, 48(sp)
ldw r22, 52(sp)
ldw r23, 56(sp)
ldw fp, 60(sp)
ldw ra, 64(sp)
addi sp, sp, 72
/* estatus --> status,ea --> pc */
eret
/*@}*/
|
vandercookking/h7_device_RTT
| 1,119
|
rt-thread/libcpu/nios/nios_ii/vector.S
|
.set noat
.globl .Lexception_exit
.section .exceptions.exit.label
.Lexception_exit:
.section .exceptions.exit, "xa"
ldw r5, 68(sp)
/* get exception back */
ldw ea, 72(sp)
/* if(rt_thread_switch_interrupt_flag == 0) goto no_need_context */
ldw r4,%gprel(rt_thread_switch_interrupt_flag)(gp)
beq r4,zero,no_need_context
need_context:
movia ea, rt_hw_context_switch_interrupt_do
/* disable interrupt */
mov r5, zero
no_need_context:
ldw ra, 0(sp)
wrctl estatus, r5
/*
* Leave a gap in the stack frame at 4(sp) for the muldiv handler to
* store zero into.
*/
ldw r1, 8(sp)
ldw r2, 12(sp)
ldw r3, 16(sp)
ldw r4, 20(sp)
ldw r5, 24(sp)
ldw r6, 28(sp)
ldw r7, 32(sp)
ldw r8, 36(sp)
ldw r9, 40(sp)
ldw r10, 44(sp)
ldw r11, 48(sp)
ldw r12, 52(sp)
ldw r13, 56(sp)
ldw r14, 60(sp)
ldw r15, 64(sp)
addi sp, sp, 76
eret
|
vandercookking/h7_device_RTT
| 7,947
|
rt-thread/libcpu/risc-v/common/context_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting implementation
* 2018/12/27 Jesven Add SMP support
* 2020/11/20 BalanceTWK Add FPU support
* 2022/12/28 WangShun Add macro to distinguish whether FPU is supported
* 2023/03/19 Flyingcys Add riscv_32e support
*/
#define __ASSEMBLY__
#include "cpuport.h"
#ifdef RT_USING_SMP
#define rt_hw_interrupt_disable rt_hw_local_irq_disable
#define rt_hw_interrupt_enable rt_hw_local_irq_enable
#endif
/*
* rt_base_t rt_hw_interrupt_disable(void);
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
csrrci a0, mstatus, 8
ret
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
csrw mstatus, a0
ret
/*
* #ifdef RT_USING_SMP
* void rt_hw_context_switch_to(rt_ubase_t to, stuct rt_thread *to_thread);
* #else
* void rt_hw_context_switch_to(rt_ubase_t to);
* #endif
* a0 --> to
* a1 --> to_thread
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
la t0, __rt_rvstack
csrw mscratch,t0
LOAD sp, (a0)
#ifdef RT_USING_SMP
mv a0, a1
call rt_cpus_lock_status_restore
#endif
LOAD a0, 2 * REGBYTES(sp)
csrw mstatus, a0
j rt_hw_context_switch_exit
/*
* #ifdef RT_USING_SMP
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
* #else
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
* #endif
*
* a0 --> from
* a1 --> to
* a2 --> to_thread
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
/* saved from thread context
* x1/ra -> sp(0)
* x1/ra -> sp(1)
* mstatus.mie -> sp(2)
* x(i) -> sp(i-4)
*/
#ifdef ARCH_RISCV_FPU
addi sp, sp, -32 * FREGBYTES
FSTORE f0, 0 * FREGBYTES(sp)
FSTORE f1, 1 * FREGBYTES(sp)
FSTORE f2, 2 * FREGBYTES(sp)
FSTORE f3, 3 * FREGBYTES(sp)
FSTORE f4, 4 * FREGBYTES(sp)
FSTORE f5, 5 * FREGBYTES(sp)
FSTORE f6, 6 * FREGBYTES(sp)
FSTORE f7, 7 * FREGBYTES(sp)
FSTORE f8, 8 * FREGBYTES(sp)
FSTORE f9, 9 * FREGBYTES(sp)
FSTORE f10, 10 * FREGBYTES(sp)
FSTORE f11, 11 * FREGBYTES(sp)
FSTORE f12, 12 * FREGBYTES(sp)
FSTORE f13, 13 * FREGBYTES(sp)
FSTORE f14, 14 * FREGBYTES(sp)
FSTORE f15, 15 * FREGBYTES(sp)
FSTORE f16, 16 * FREGBYTES(sp)
FSTORE f17, 17 * FREGBYTES(sp)
FSTORE f18, 18 * FREGBYTES(sp)
FSTORE f19, 19 * FREGBYTES(sp)
FSTORE f20, 20 * FREGBYTES(sp)
FSTORE f21, 21 * FREGBYTES(sp)
FSTORE f22, 22 * FREGBYTES(sp)
FSTORE f23, 23 * FREGBYTES(sp)
FSTORE f24, 24 * FREGBYTES(sp)
FSTORE f25, 25 * FREGBYTES(sp)
FSTORE f26, 26 * FREGBYTES(sp)
FSTORE f27, 27 * FREGBYTES(sp)
FSTORE f28, 28 * FREGBYTES(sp)
FSTORE f29, 29 * FREGBYTES(sp)
FSTORE f30, 30 * FREGBYTES(sp)
FSTORE f31, 31 * FREGBYTES(sp)
#endif
#ifndef __riscv_32e
addi sp, sp, -32 * REGBYTES
#else
addi sp, sp, -16 * REGBYTES
#endif
STORE sp, (a0)
STORE x1, 0 * REGBYTES(sp)
STORE x1, 1 * REGBYTES(sp)
csrr a0, mstatus
andi a0, a0, 8
beqz a0, save_mpie
li a0, 0x80
save_mpie:
STORE a0, 2 * REGBYTES(sp)
STORE x4, 4 * REGBYTES(sp)
STORE x5, 5 * REGBYTES(sp)
STORE x6, 6 * REGBYTES(sp)
STORE x7, 7 * REGBYTES(sp)
STORE x8, 8 * REGBYTES(sp)
STORE x9, 9 * REGBYTES(sp)
STORE x10, 10 * REGBYTES(sp)
STORE x11, 11 * REGBYTES(sp)
STORE x12, 12 * REGBYTES(sp)
STORE x13, 13 * REGBYTES(sp)
STORE x14, 14 * REGBYTES(sp)
STORE x15, 15 * REGBYTES(sp)
#ifndef __riscv_32e
STORE x16, 16 * REGBYTES(sp)
STORE x17, 17 * REGBYTES(sp)
STORE x18, 18 * REGBYTES(sp)
STORE x19, 19 * REGBYTES(sp)
STORE x20, 20 * REGBYTES(sp)
STORE x21, 21 * REGBYTES(sp)
STORE x22, 22 * REGBYTES(sp)
STORE x23, 23 * REGBYTES(sp)
STORE x24, 24 * REGBYTES(sp)
STORE x25, 25 * REGBYTES(sp)
STORE x26, 26 * REGBYTES(sp)
STORE x27, 27 * REGBYTES(sp)
STORE x28, 28 * REGBYTES(sp)
STORE x29, 29 * REGBYTES(sp)
STORE x30, 30 * REGBYTES(sp)
STORE x31, 31 * REGBYTES(sp)
#endif
/* restore to thread context
* sp(0) -> epc;
* sp(1) -> ra;
* sp(i) -> x(i+2)
*/
LOAD sp, (a1)
#ifdef RT_USING_SMP
mv a0, a2
call rt_cpus_lock_status_restore
#endif /*RT_USING_SMP*/
j rt_hw_context_switch_exit
#ifdef RT_USING_SMP
/*
* void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
*
* a0 --> context
* a1 --> from
* a2 --> to
* a3 --> to_thread
*/
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
STORE a0, 0(a1)
LOAD sp, 0(a2)
move a0, a3
call rt_cpus_lock_status_restore
j rt_hw_context_switch_exit
#endif
.global rt_hw_context_switch_exit
rt_hw_context_switch_exit:
#ifdef RT_USING_SMP
#ifdef RT_USING_SIGNALS
mv a0, sp
csrr t0, mhartid
/* switch interrupt stack of current cpu */
la sp, __stack_start__
addi t1, t0, 1
li t2, __STACKSIZE__
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * __STACKSIZE__ + __stack_start__ */
call rt_signal_check
mv sp, a0
#endif
#endif
/* resw ra to mepc */
LOAD a0, 0 * REGBYTES(sp)
csrw mepc, a0
LOAD x1, 1 * REGBYTES(sp)
#ifdef ARCH_RISCV_FPU
li t0, 0x7800
#else
li t0, 0x1800
#endif
csrw mstatus, t0
LOAD a0, 2 * REGBYTES(sp)
csrs mstatus, a0
LOAD x4, 4 * REGBYTES(sp)
LOAD x5, 5 * REGBYTES(sp)
LOAD x6, 6 * REGBYTES(sp)
LOAD x7, 7 * REGBYTES(sp)
LOAD x8, 8 * REGBYTES(sp)
LOAD x9, 9 * REGBYTES(sp)
LOAD x10, 10 * REGBYTES(sp)
LOAD x11, 11 * REGBYTES(sp)
LOAD x12, 12 * REGBYTES(sp)
LOAD x13, 13 * REGBYTES(sp)
LOAD x14, 14 * REGBYTES(sp)
LOAD x15, 15 * REGBYTES(sp)
#ifndef __riscv_32e
LOAD x16, 16 * REGBYTES(sp)
LOAD x17, 17 * REGBYTES(sp)
LOAD x18, 18 * REGBYTES(sp)
LOAD x19, 19 * REGBYTES(sp)
LOAD x20, 20 * REGBYTES(sp)
LOAD x21, 21 * REGBYTES(sp)
LOAD x22, 22 * REGBYTES(sp)
LOAD x23, 23 * REGBYTES(sp)
LOAD x24, 24 * REGBYTES(sp)
LOAD x25, 25 * REGBYTES(sp)
LOAD x26, 26 * REGBYTES(sp)
LOAD x27, 27 * REGBYTES(sp)
LOAD x28, 28 * REGBYTES(sp)
LOAD x29, 29 * REGBYTES(sp)
LOAD x30, 30 * REGBYTES(sp)
LOAD x31, 31 * REGBYTES(sp)
addi sp, sp, 32 * REGBYTES
#else
addi sp, sp, 16 * REGBYTES
#endif
#ifdef ARCH_RISCV_FPU
FLOAD f0, 0 * FREGBYTES(sp)
FLOAD f1, 1 * FREGBYTES(sp)
FLOAD f2, 2 * FREGBYTES(sp)
FLOAD f3, 3 * FREGBYTES(sp)
FLOAD f4, 4 * FREGBYTES(sp)
FLOAD f5, 5 * FREGBYTES(sp)
FLOAD f6, 6 * FREGBYTES(sp)
FLOAD f7, 7 * FREGBYTES(sp)
FLOAD f8, 8 * FREGBYTES(sp)
FLOAD f9, 9 * FREGBYTES(sp)
FLOAD f10, 10 * FREGBYTES(sp)
FLOAD f11, 11 * FREGBYTES(sp)
FLOAD f12, 12 * FREGBYTES(sp)
FLOAD f13, 13 * FREGBYTES(sp)
FLOAD f14, 14 * FREGBYTES(sp)
FLOAD f15, 15 * FREGBYTES(sp)
FLOAD f16, 16 * FREGBYTES(sp)
FLOAD f17, 17 * FREGBYTES(sp)
FLOAD f18, 18 * FREGBYTES(sp)
FLOAD f19, 19 * FREGBYTES(sp)
FLOAD f20, 20 * FREGBYTES(sp)
FLOAD f21, 21 * FREGBYTES(sp)
FLOAD f22, 22 * FREGBYTES(sp)
FLOAD f23, 23 * FREGBYTES(sp)
FLOAD f24, 24 * FREGBYTES(sp)
FLOAD f25, 25 * FREGBYTES(sp)
FLOAD f26, 26 * FREGBYTES(sp)
FLOAD f27, 27 * FREGBYTES(sp)
FLOAD f28, 28 * FREGBYTES(sp)
FLOAD f29, 29 * FREGBYTES(sp)
FLOAD f30, 30 * FREGBYTES(sp)
FLOAD f31, 31 * FREGBYTES(sp)
addi sp, sp, 32 * FREGBYTES
#endif
mret
|
vandercookking/h7_device_RTT
| 6,174
|
rt-thread/libcpu/risc-v/common/interrupt_gcc.S
|
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023/01/17 WangShun The first version
* 2023/03/19 Flyingcys Add riscv_32e support
* 2023/08/09 HPMicro Fix the issue t0 was modified unexpectedly before being saved
*/
#define __ASSEMBLY__
#include "cpuport.h"
.section .text.entry, "ax"
#if defined(SOC_SERIES_GD32VF103V)
.align 6
#else
.align 2
#endif
.global SW_handler
SW_handler:
csrci mstatus, 0x8
#ifdef ARCH_RISCV_FPU
addi sp, sp, -32 * FREGBYTES
FSTORE f0, 0 * FREGBYTES(sp)
FSTORE f1, 1 * FREGBYTES(sp)
FSTORE f2, 2 * FREGBYTES(sp)
FSTORE f3, 3 * FREGBYTES(sp)
FSTORE f4, 4 * FREGBYTES(sp)
FSTORE f5, 5 * FREGBYTES(sp)
FSTORE f6, 6 * FREGBYTES(sp)
FSTORE f7, 7 * FREGBYTES(sp)
FSTORE f8, 8 * FREGBYTES(sp)
FSTORE f9, 9 * FREGBYTES(sp)
FSTORE f10, 10 * FREGBYTES(sp)
FSTORE f11, 11 * FREGBYTES(sp)
FSTORE f12, 12 * FREGBYTES(sp)
FSTORE f13, 13 * FREGBYTES(sp)
FSTORE f14, 14 * FREGBYTES(sp)
FSTORE f15, 15 * FREGBYTES(sp)
FSTORE f16, 16 * FREGBYTES(sp)
FSTORE f17, 17 * FREGBYTES(sp)
FSTORE f18, 18 * FREGBYTES(sp)
FSTORE f19, 19 * FREGBYTES(sp)
FSTORE f20, 20 * FREGBYTES(sp)
FSTORE f21, 21 * FREGBYTES(sp)
FSTORE f22, 22 * FREGBYTES(sp)
FSTORE f23, 23 * FREGBYTES(sp)
FSTORE f24, 24 * FREGBYTES(sp)
FSTORE f25, 25 * FREGBYTES(sp)
FSTORE f26, 26 * FREGBYTES(sp)
FSTORE f27, 27 * FREGBYTES(sp)
FSTORE f28, 28 * FREGBYTES(sp)
FSTORE f29, 29 * FREGBYTES(sp)
FSTORE f30, 30 * FREGBYTES(sp)
FSTORE f31, 31 * FREGBYTES(sp)
#endif
/* save all from thread context */
#ifndef __riscv_32e
addi sp, sp, -32 * REGBYTES
#else
addi sp, sp, -16 * REGBYTES
#endif
STORE x5, 5 * REGBYTES(sp)
STORE x1, 1 * REGBYTES(sp)
/* Mandatory set the MPIE of mstatus */
li t0, 0x80
STORE t0, 2 * REGBYTES(sp)
STORE x4, 4 * REGBYTES(sp)
STORE x6, 6 * REGBYTES(sp)
STORE x7, 7 * REGBYTES(sp)
STORE x8, 8 * REGBYTES(sp)
STORE x9, 9 * REGBYTES(sp)
STORE x10, 10 * REGBYTES(sp)
STORE x11, 11 * REGBYTES(sp)
STORE x12, 12 * REGBYTES(sp)
STORE x13, 13 * REGBYTES(sp)
STORE x14, 14 * REGBYTES(sp)
STORE x15, 15 * REGBYTES(sp)
#ifndef __riscv_32e
STORE x16, 16 * REGBYTES(sp)
STORE x17, 17 * REGBYTES(sp)
STORE x18, 18 * REGBYTES(sp)
STORE x19, 19 * REGBYTES(sp)
STORE x20, 20 * REGBYTES(sp)
STORE x21, 21 * REGBYTES(sp)
STORE x22, 22 * REGBYTES(sp)
STORE x23, 23 * REGBYTES(sp)
STORE x24, 24 * REGBYTES(sp)
STORE x25, 25 * REGBYTES(sp)
STORE x26, 26 * REGBYTES(sp)
STORE x27, 27 * REGBYTES(sp)
STORE x28, 28 * REGBYTES(sp)
STORE x29, 29 * REGBYTES(sp)
STORE x30, 30 * REGBYTES(sp)
STORE x31, 31 * REGBYTES(sp)
#endif
/* switch to interrupt stack */
csrrw sp,mscratch,sp
/* interrupt handle */
call rt_interrupt_enter
/* Do the work after saving the above */
jal rt_hw_do_after_save_above
call rt_interrupt_leave
/* switch to from thread stack */
csrrw sp,mscratch,sp
/* Determine whether to trigger scheduling at the interrupt function */
la t0, rt_thread_switch_interrupt_flag
lw t2, 0(t0)
beqz t2, 1f
/* clear the flag of rt_thread_switch_interrupt_flag */
sw zero, 0(t0)
csrr a0, mepc
STORE a0, 0 * REGBYTES(sp)
la t0, rt_interrupt_from_thread
LOAD t1, 0(t0)
STORE sp, 0(t1)
la t0, rt_interrupt_to_thread
LOAD t1, 0(t0)
LOAD sp, 0(t1)
LOAD a0, 0 * REGBYTES(sp)
csrw mepc, a0
1:
LOAD x1, 1 * REGBYTES(sp)
/* Set the mode after MRET */
li t0, 0x1800
csrs mstatus, t0
LOAD t0, 2 * REGBYTES(sp)
csrs mstatus, t0
LOAD x4, 4 * REGBYTES(sp)
LOAD x5, 5 * REGBYTES(sp)
LOAD x6, 6 * REGBYTES(sp)
LOAD x7, 7 * REGBYTES(sp)
LOAD x8, 8 * REGBYTES(sp)
LOAD x9, 9 * REGBYTES(sp)
LOAD x10, 10 * REGBYTES(sp)
LOAD x11, 11 * REGBYTES(sp)
LOAD x12, 12 * REGBYTES(sp)
LOAD x13, 13 * REGBYTES(sp)
LOAD x14, 14 * REGBYTES(sp)
LOAD x15, 15 * REGBYTES(sp)
#ifndef __riscv_32e
LOAD x16, 16 * REGBYTES(sp)
LOAD x17, 17 * REGBYTES(sp)
LOAD x18, 18 * REGBYTES(sp)
LOAD x19, 19 * REGBYTES(sp)
LOAD x20, 20 * REGBYTES(sp)
LOAD x21, 21 * REGBYTES(sp)
LOAD x22, 22 * REGBYTES(sp)
LOAD x23, 23 * REGBYTES(sp)
LOAD x24, 24 * REGBYTES(sp)
LOAD x25, 25 * REGBYTES(sp)
LOAD x26, 26 * REGBYTES(sp)
LOAD x27, 27 * REGBYTES(sp)
LOAD x28, 28 * REGBYTES(sp)
LOAD x29, 29 * REGBYTES(sp)
LOAD x30, 30 * REGBYTES(sp)
LOAD x31, 31 * REGBYTES(sp)
addi sp, sp, 32 * REGBYTES
#else
addi sp, sp, 16 * REGBYTES
#endif
#ifdef ARCH_RISCV_FPU
FLOAD f0, 0 * FREGBYTES(sp)
FLOAD f1, 1 * FREGBYTES(sp)
FLOAD f2, 2 * FREGBYTES(sp)
FLOAD f3, 3 * FREGBYTES(sp)
FLOAD f4, 4 * FREGBYTES(sp)
FLOAD f5, 5 * FREGBYTES(sp)
FLOAD f6, 6 * FREGBYTES(sp)
FLOAD f7, 7 * FREGBYTES(sp)
FLOAD f8, 8 * FREGBYTES(sp)
FLOAD f9, 9 * FREGBYTES(sp)
FLOAD f10, 10 * FREGBYTES(sp)
FLOAD f11, 11 * FREGBYTES(sp)
FLOAD f12, 12 * FREGBYTES(sp)
FLOAD f13, 13 * FREGBYTES(sp)
FLOAD f14, 14 * FREGBYTES(sp)
FLOAD f15, 15 * FREGBYTES(sp)
FLOAD f16, 16 * FREGBYTES(sp)
FLOAD f17, 17 * FREGBYTES(sp)
FLOAD f18, 18 * FREGBYTES(sp)
FLOAD f19, 19 * FREGBYTES(sp)
FLOAD f20, 20 * FREGBYTES(sp)
FLOAD f21, 21 * FREGBYTES(sp)
FLOAD f22, 22 * FREGBYTES(sp)
FLOAD f23, 23 * FREGBYTES(sp)
FLOAD f24, 24 * FREGBYTES(sp)
FLOAD f25, 25 * FREGBYTES(sp)
FLOAD f26, 26 * FREGBYTES(sp)
FLOAD f27, 27 * FREGBYTES(sp)
FLOAD f28, 28 * FREGBYTES(sp)
FLOAD f29, 29 * FREGBYTES(sp)
FLOAD f30, 30 * FREGBYTES(sp)
FLOAD f31, 31 * FREGBYTES(sp)
addi sp, sp, 32 * FREGBYTES
#endif
mret
|
vandercookking/h7_device_RTT
| 1,857
|
rt-thread/libcpu/risc-v/juicevm/interrupt_gcc.S
|
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021/04/24 Juice The first version
*/
#include "cpuport.h"
.section .text.entry
.align 2
.global trap_entry
trap_entry:
/* save thread context to thread stack */
addi sp, sp, -32 * REGBYTES
STORE x1, 1 * REGBYTES(sp)
csrr x1, mstatus
STORE x1, 2 * REGBYTES(sp)
csrr x1, mepc
STORE x1, 0 * REGBYTES(sp)
STORE x4, 4 * REGBYTES(sp)
STORE x5, 5 * REGBYTES(sp)
STORE x6, 6 * REGBYTES(sp)
STORE x7, 7 * REGBYTES(sp)
STORE x8, 8 * REGBYTES(sp)
STORE x9, 9 * REGBYTES(sp)
STORE x10, 10 * REGBYTES(sp)
STORE x11, 11 * REGBYTES(sp)
STORE x12, 12 * REGBYTES(sp)
STORE x13, 13 * REGBYTES(sp)
STORE x14, 14 * REGBYTES(sp)
STORE x15, 15 * REGBYTES(sp)
STORE x16, 16 * REGBYTES(sp)
STORE x17, 17 * REGBYTES(sp)
STORE x18, 18 * REGBYTES(sp)
STORE x19, 19 * REGBYTES(sp)
STORE x20, 20 * REGBYTES(sp)
STORE x21, 21 * REGBYTES(sp)
STORE x22, 22 * REGBYTES(sp)
STORE x23, 23 * REGBYTES(sp)
STORE x24, 24 * REGBYTES(sp)
STORE x25, 25 * REGBYTES(sp)
STORE x26, 26 * REGBYTES(sp)
STORE x27, 27 * REGBYTES(sp)
STORE x28, 28 * REGBYTES(sp)
STORE x29, 29 * REGBYTES(sp)
STORE x30, 30 * REGBYTES(sp)
STORE x31, 31 * REGBYTES(sp)
/* switch to interrupt stack */
move s0, sp
/* handle interrupt */
call rt_interrupt_enter
csrr a0, mcause
csrr a1, mepc
mv a2, s0
call handle_trap
call rt_interrupt_leave
/* switch to from_thread stack */
move sp, s0
/* need to switch new thread */
la s0, rt_thread_switch_interrupt_flag
lw s2, 0(s0)
beqz s2, spurious_interrupt
sw zero, 0(s0)
la s0, rt_interrupt_from_thread
LOAD s1, 0(s0)
STORE sp, 0(s1)
la s0, rt_interrupt_to_thread
LOAD s1, 0(s0)
LOAD sp, 0(s1)
spurious_interrupt:
tail rt_hw_context_switch_exit
|
vandercookking/h7_device_RTT
| 2,013
|
rt-thread/libcpu/risc-v/virt64/startup_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/01 Bernard The first version
* 2018/12/27 Jesven Add SMP support
* 2020/6/12 Xim Port to QEMU and remove SMP support
*/
#include <encoding.h>
#include <cpuport.h>
.data
.global boot_hartid /* global varible rt_boot_hartid in .data section */
boot_hartid:
.word 0xdeadbeef
.global _start
.section ".start", "ax"
_start:
j 1f
.word 0xdeadbeef
.align 3
.global g_wake_up
g_wake_up:
.dword 1
.dword 0
1:
/* save hartid */
la t0, boot_hartid /* global varible rt_boot_hartid */
mv t1, a0 /* get hartid in S-mode frome a0 register */
sw t1, (t0) /* store t1 register low 4 bits in memory address which is stored in t0 */
/* clear Interrupt Registers */
csrw sie, 0
csrw sip, 0
/* set Trap Vector Base Address Register */
la t0, trap_entry
csrw stvec, t0
li x1, 0
li x2, 0
li x3, 0
li x4, 0
li x5, 0
li x6, 0
li x7, 0
li x8, 0
li x9, 0
li x10,0
li x11,0
li x12,0
li x13,0
li x14,0
li x15,0
li x16,0
li x17,0
li x18,0
li x19,0
li x20,0
li x21,0
li x22,0
li x23,0
li x24,0
li x25,0
li x26,0
li x27,0
li x28,0
li x29,0
li x30,0
li x31,0
/* set to disable FPU */
li t0, SSTATUS_FS + SSTATUS_VS
csrc sstatus, t0
li t0, SSTATUS_SUM
csrs sstatus, t0
.option push
.option norelax
la gp, __global_pointer$
.option pop
/* removed SMP support here */
la sp, __stack_start__
li t0, __STACKSIZE__
add sp, sp, t0
/**
* sscratch is always zero on kernel mode
*/
csrw sscratch, zero
call init_bss
call sbi_init
j primary_cpu_entry
|
vandercookking/h7_device_RTT
| 2,620
|
rt-thread/libcpu/risc-v/virt64/context_gcc.S
|
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting implementation
* 2018/12/27 Jesven Add SMP support
* 2021/02/02 lizhirui Add userspace support
* 2022/10/22 WangXiaoyao Support User mode RVV;
* Trimming process switch context
*/
#include "cpuport.h"
#include "stackframe.h"
.macro PUSH_8 reg
addi sp, sp, -8
STORE \reg, (sp)
.endm
.macro POP_8 reg
LOAD \reg, (sp)
addi sp, sp, 8
.endm
.macro RESERVE_CONTEXT
PUSH_8 tp
PUSH_8 ra
PUSH_8 s0
PUSH_8 s1
PUSH_8 s2
PUSH_8 s3
PUSH_8 s4
PUSH_8 s5
PUSH_8 s6
PUSH_8 s7
PUSH_8 s8
PUSH_8 s9
PUSH_8 s10
PUSH_8 s11
csrr s11, sstatus
li s10, (SSTATUS_SPP)
or s11, s11, s10
PUSH_8 s11
.endm
.macro RESTORE_CONTEXT
POP_8 s11
csrw sstatus, s11
POP_8 s11
POP_8 s10
POP_8 s9
POP_8 s8
POP_8 s7
POP_8 s6
POP_8 s5
POP_8 s4
POP_8 s3
POP_8 s2
POP_8 s1
POP_8 s0
POP_8 ra
POP_8 tp
csrw sepc, ra
.endm
/*
* void rt_hw_context_switch_to(rt_ubase_t to);
*
* a0 --> to SP pointer
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
LOAD sp, (a0)
la s0, rt_current_thread
LOAD s1, (s0)
#ifdef RT_USING_SMART
mv a0, s1
jal lwp_aspace_switch
#endif
RESTORE_CONTEXT
sret
/*
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
*
* a0 --> from SP pointer
* a1 --> to SP pointer
*
* It should only be used on local interrupt disable
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
RESERVE_CONTEXT
STORE sp, (a0)
// restore to thread SP
LOAD sp, (a1)
// restore Address Space
la s0, rt_current_thread
LOAD s1, (s0)
#ifdef RT_USING_SMART
mv a0, s1
jal lwp_aspace_switch
#endif
RESTORE_CONTEXT
sret
#ifdef ENABLE_VECTOR
/**
* @param a0 pointer to frame bottom
*/
.global rt_hw_vector_ctx_save
rt_hw_vector_ctx_save:
SAVE_VECTOR a0
ret
/**
* @param a0 pointer to frame bottom
*/
.global rt_hw_vector_ctx_restore
rt_hw_vector_ctx_restore:
RESTORE_VECTOR a0
ret
.global rt_hw_disable_vector
rt_hw_disable_vector:
li t0, SSTATUS_VS
csrc sstatus, t0
ret
.global rt_hw_enable_vector
rt_hw_enable_vector:
li t0, SSTATUS_VS
csrs sstatus, t0
ret
#endif /* ENABLE_VECTOR */
|
vandercookking/h7_device_RTT
| 2,348
|
rt-thread/libcpu/risc-v/virt64/interrupt_gcc.S
|
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/02 Bernard The first version
* 2018/12/27 Jesven Add SMP schedule
* 2021/02/02 lizhirui Add userspace support
* 2021/12/24 JasonHu Add user setting save/restore
* 2022/10/22 WangXiaoyao Support kernel mode RVV;
* Rewrite trap handling routine
*/
#include "cpuport.h"
#include "encoding.h"
#include "stackframe.h"
.align 2
.global trap_entry
.global debug_check_sp
trap_entry:
// distingush exception from kernel or user
csrrw sp, sscratch, sp
bnez sp, _save_context
// BE REALLY careful with sscratch,
// if it's wrong, we could looping here forever
// or accessing random memory and seeing things totally
// messy after a long time and don't even know why
_from_kernel:
csrr sp, sscratch
j _save_context
_save_context:
SAVE_ALL
// clear sscratch to say 'now in kernel mode'
csrw sscratch, zero
RESTORE_SYS_GP
// now we are ready to enter interrupt / excepiton handler
_distinguish_syscall:
csrr t0, scause
#ifdef RT_USING_SMART
// TODO swap 8 with config macro name
li t1, 8
beq t0, t1, syscall_entry
// syscall never return here
#endif
_handle_interrupt_and_exception:
mv a0, t0
csrrc a1, stval, zero
csrr a2, sepc
// sp as exception frame pointer
mv a3, sp
call handle_trap
_interrupt_exit:
la s0, rt_thread_switch_interrupt_flag
lw s2, 0(s0)
beqz s2, _resume_execution
sw zero, 0(s0)
_context_switch:
la t0, rt_interrupt_from_thread
LOAD a0, 0(t0)
la t0, rt_interrupt_to_thread
LOAD a1, 0(t0)
jal rt_hw_context_switch
_resume_execution:
#ifdef RT_USING_SMART
LOAD t0, FRAME_OFF_SSTATUS(sp)
andi t0, t0, SSTATUS_SPP
beqz t0, arch_ret_to_user
#endif
_resume_kernel:
RESTORE_ALL
csrw sscratch, zero
sret
.global rt_hw_interrupt_enable
rt_hw_interrupt_enable:
csrs sstatus, a0 /* restore to old csr */
jr ra
.global rt_hw_interrupt_disable
rt_hw_interrupt_disable:
csrrci a0, sstatus, 2 /* clear SIE */
jr ra
|
vandercookking/h7_device_RTT
| 2,289
|
rt-thread/libcpu/risc-v/k210/startup_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/01 Bernard The first version
* 2018/12/27 Jesven Add SMP support
*/
#define __ASSEMBLY__
#define MSTATUS_FS 0x00006000U /* initial state of FPU */
#include <cpuport.h>
.global _start
.section ".start", "ax"
_start:
j 1f
.word 0xdeadbeef
.align 3
.global g_wake_up
g_wake_up:
.dword 1
.dword 0
1:
csrw mideleg, 0
csrw medeleg, 0
csrw mie, 0
csrw mip, 0
la t0, trap_entry
csrw mtvec, t0
li x1, 0
li x2, 0
li x3, 0
li x4, 0
li x5, 0
li x6, 0
li x7, 0
li x8, 0
li x9, 0
li x10,0
li x11,0
li x12,0
li x13,0
li x14,0
li x15,0
li x16,0
li x17,0
li x18,0
li x19,0
li x20,0
li x21,0
li x22,0
li x23,0
li x24,0
li x25,0
li x26,0
li x27,0
li x28,0
li x29,0
li x30,0
li x31,0
/* set to initial state of FPU and disable interrupt */
li t0, MSTATUS_FS
csrs mstatus, t0
fssr x0
fmv.w.x f0, x0
fmv.w.x f1, x0
fmv.w.x f2, x0
fmv.w.x f3, x0
fmv.w.x f4, x0
fmv.w.x f5, x0
fmv.w.x f6, x0
fmv.w.x f7, x0
fmv.w.x f8, x0
fmv.w.x f9, x0
fmv.w.x f10,x0
fmv.w.x f11,x0
fmv.w.x f12,x0
fmv.w.x f13,x0
fmv.w.x f14,x0
fmv.w.x f15,x0
fmv.w.x f16,x0
fmv.w.x f17,x0
fmv.w.x f18,x0
fmv.w.x f19,x0
fmv.w.x f20,x0
fmv.w.x f21,x0
fmv.w.x f22,x0
fmv.w.x f23,x0
fmv.w.x f24,x0
fmv.w.x f25,x0
fmv.w.x f26,x0
fmv.w.x f27,x0
fmv.w.x f28,x0
fmv.w.x f29,x0
fmv.w.x f30,x0
fmv.w.x f31,x0
.option push
.option norelax
la gp, __global_pointer$
.option pop
/* get cpu id */
csrr a0, mhartid
la sp, __stack_start__
addi t1, a0, 1
li t2, __STACKSIZE__
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * __STACKSIZE__ + __stack_start__ */
/* other cpu core, jump to cpu entry directly */
bnez a0, secondary_cpu_entry
tail primary_cpu_entry
secondary_cpu_entry:
#ifdef RT_USING_SMP
la a0, secondary_boot_flag
ld a0, 0(a0)
li a1, 0xa55a
beq a0, a1, 1f
#endif
j secondary_cpu_entry
#ifdef RT_USING_SMP
1:
tail secondary_cpu_c_start
.data
.global secondary_boot_flag
.align 3
secondary_boot_flag:
.dword 0
#endif
|
vandercookking/h7_device_RTT
| 3,793
|
rt-thread/libcpu/risc-v/k210/interrupt_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/02 Bernard The first version
* 2018/12/27 Jesven Add SMP schedule
*/
#define __ASSEMBLY__
#include "cpuport.h"
.section .text.entry
.align 2
.global trap_entry
trap_entry:
#ifdef ARCH_RISCV_FPU
addi sp, sp, -32 * FREGBYTES
FSTORE f0, 0 * FREGBYTES(sp)
FSTORE f1, 1 * FREGBYTES(sp)
FSTORE f2, 2 * FREGBYTES(sp)
FSTORE f3, 3 * FREGBYTES(sp)
FSTORE f4, 4 * FREGBYTES(sp)
FSTORE f5, 5 * FREGBYTES(sp)
FSTORE f6, 6 * FREGBYTES(sp)
FSTORE f7, 7 * FREGBYTES(sp)
FSTORE f8, 8 * FREGBYTES(sp)
FSTORE f9, 9 * FREGBYTES(sp)
FSTORE f10, 10 * FREGBYTES(sp)
FSTORE f11, 11 * FREGBYTES(sp)
FSTORE f12, 12 * FREGBYTES(sp)
FSTORE f13, 13 * FREGBYTES(sp)
FSTORE f14, 14 * FREGBYTES(sp)
FSTORE f15, 15 * FREGBYTES(sp)
FSTORE f16, 16 * FREGBYTES(sp)
FSTORE f17, 17 * FREGBYTES(sp)
FSTORE f18, 18 * FREGBYTES(sp)
FSTORE f19, 19 * FREGBYTES(sp)
FSTORE f20, 20 * FREGBYTES(sp)
FSTORE f21, 21 * FREGBYTES(sp)
FSTORE f22, 22 * FREGBYTES(sp)
FSTORE f23, 23 * FREGBYTES(sp)
FSTORE f24, 24 * FREGBYTES(sp)
FSTORE f25, 25 * FREGBYTES(sp)
FSTORE f26, 26 * FREGBYTES(sp)
FSTORE f27, 27 * FREGBYTES(sp)
FSTORE f28, 28 * FREGBYTES(sp)
FSTORE f29, 29 * FREGBYTES(sp)
FSTORE f30, 30 * FREGBYTES(sp)
FSTORE f31, 31 * FREGBYTES(sp)
#endif
/* save thread context to thread stack */
addi sp, sp, -32 * REGBYTES
STORE x1, 1 * REGBYTES(sp)
csrr x1, mstatus
STORE x1, 2 * REGBYTES(sp)
csrr x1, mepc
STORE x1, 0 * REGBYTES(sp)
STORE x4, 4 * REGBYTES(sp)
STORE x5, 5 * REGBYTES(sp)
STORE x6, 6 * REGBYTES(sp)
STORE x7, 7 * REGBYTES(sp)
STORE x8, 8 * REGBYTES(sp)
STORE x9, 9 * REGBYTES(sp)
STORE x10, 10 * REGBYTES(sp)
STORE x11, 11 * REGBYTES(sp)
STORE x12, 12 * REGBYTES(sp)
STORE x13, 13 * REGBYTES(sp)
STORE x14, 14 * REGBYTES(sp)
STORE x15, 15 * REGBYTES(sp)
STORE x16, 16 * REGBYTES(sp)
STORE x17, 17 * REGBYTES(sp)
STORE x18, 18 * REGBYTES(sp)
STORE x19, 19 * REGBYTES(sp)
STORE x20, 20 * REGBYTES(sp)
STORE x21, 21 * REGBYTES(sp)
STORE x22, 22 * REGBYTES(sp)
STORE x23, 23 * REGBYTES(sp)
STORE x24, 24 * REGBYTES(sp)
STORE x25, 25 * REGBYTES(sp)
STORE x26, 26 * REGBYTES(sp)
STORE x27, 27 * REGBYTES(sp)
STORE x28, 28 * REGBYTES(sp)
STORE x29, 29 * REGBYTES(sp)
STORE x30, 30 * REGBYTES(sp)
STORE x31, 31 * REGBYTES(sp)
/* switch to interrupt stack */
move s0, sp
/* get cpu id */
csrr t0, mhartid
/* switch interrupt stack of current cpu */
la sp, __stack_start__
addi t1, t0, 1
li t2, __STACKSIZE__
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * __STACKSIZE__ + __stack_start__ */
/* handle interrupt */
call rt_interrupt_enter
csrr a0, mcause
csrr a1, mepc
mv a2, s0
call handle_trap
call rt_interrupt_leave
#ifdef RT_USING_SMP
/* s0 --> sp */
mv sp, s0
mv a0, s0
call rt_scheduler_do_irq_switch
tail rt_hw_context_switch_exit
#else
/* switch to from_thread stack */
move sp, s0
/* need to switch new thread */
la s0, rt_thread_switch_interrupt_flag
lw s2, 0(s0)
beqz s2, spurious_interrupt
sw zero, 0(s0)
la s0, rt_interrupt_from_thread
LOAD s1, 0(s0)
STORE sp, 0(s1)
la s0, rt_interrupt_to_thread
LOAD s1, 0(s0)
LOAD sp, 0(s1)
#endif
spurious_interrupt:
tail rt_hw_context_switch_exit
|
vandercookking/h7_device_RTT
| 1,836
|
rt-thread/libcpu/risc-v/t-head/c906/startup_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/01 Bernard The first version
* 2018/12/27 Jesven Add SMP support
* 2020/6/12 Xim Port to QEMU and remove SMP support
*/
#include <encoding.h>
#include <cpuport.h>
boot_hartid: .int
.global boot_hartid
.global _start
.section ".start", "ax"
_start:
j 1f
.word 0xdeadbeef
.align 3
.global g_wake_up
g_wake_up:
.dword 1
.dword 0
1:
/* save hartid */
la t0, boot_hartid /* global varible rt_boot_hartid */
mv t1, a0 /* get hartid in S-mode frome a0 register */
sw t1, (t0) /* store t1 register low 4 bits in memory address which is stored in t0 */
/* clear Interrupt Registers */
csrw sie, 0
csrw sip, 0
/* set Trap Vector Base Address Register */
la t0, trap_entry
csrw stvec, t0
li x1, 0
li x2, 0
li x3, 0
li x4, 0
li x5, 0
li x6, 0
li x7, 0
li x8, 0
li x9, 0
li x10,0
li x11,0
li x12,0
li x13,0
li x14,0
li x15,0
li x16,0
li x17,0
li x18,0
li x19,0
li x20,0
li x21,0
li x22,0
li x23,0
li x24,0
li x25,0
li x26,0
li x27,0
li x28,0
li x29,0
li x30,0
li x31,0
/* set to disable FPU */
li t0, SSTATUS_FS
csrc sstatus, t0
li t0, SSTATUS_SUM
csrs sstatus, t0
.option push
.option norelax
la gp, __global_pointer$
.option pop
/* removed SMP support here */
la sp, __stack_start__
li t0, __STACKSIZE__
add sp, sp, t0
csrw sscratch, sp
call init_bss
call sbi_init
j primary_cpu_entry
|
vandercookking/h7_device_RTT
| 1,249
|
rt-thread/libcpu/risc-v/t-head/c906/context_gcc.S
|
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting implementation
* 2018/12/27 Jesven Add SMP support
* 2021/02/02 lizhirui Add userspace support
*/
#include "cpuport.h"
#include "stackframe.h"
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
LOAD sp, (a0)
la s0, rt_current_thread
LOAD s1, (s0)
#ifdef RT_USING_SMART
mv a0, s1
jal lwp_aspace_switch
#endif
RESTORE_ALL
sret
/*
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
*
* a0 --> from
* a1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
mv t2, sp
li t0, 0x120//set SPIE and SPP = 1
csrs sstatus, t0//if enter here,caller must be in system thread
csrw sepc, ra//return address
//saved from thread context
SAVE_ALL
STORE t2, 32 * REGBYTES(sp)//save user_sp
STORE sp, (a0)
//restore to thread context
LOAD sp, (a1)
la s0, rt_current_thread
LOAD s1, (s0)
#ifdef RT_USING_SMART
mv a0, s1
jal lwp_aspace_switch
#endif
RESTORE_ALL
sret
|
vandercookking/h7_device_RTT
| 2,607
|
rt-thread/libcpu/risc-v/t-head/c906/interrupt_gcc.S
|
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/02 Bernard The first version
* 2018/12/27 Jesven Add SMP schedule
* 2021/02/02 lizhirui Add userspace support
* 2021/12/24 JasonHu Add user setting save/restore
*/
#include "cpuport.h"
#include "encoding.h"
#include "stackframe.h"
.section .text.entry
.align 2
.global trap_entry
.extern __stack_cpu0
.extern get_current_thread_kernel_stack_top
trap_entry:
//backup sp
csrrw sp, sscratch, sp
//load interrupt stack
la sp, __stack_cpu0
//backup context
SAVE_ALL
RESTORE_SYS_GP
#ifdef RT_USING_SMART
//check syscall
csrr t0, scause
li t1, 8//environment call from u-mode
beq t0, t1, syscall_entry
#endif
csrr a0, scause
csrrc a1, stval, zero
csrr a2, sepc
mv a3, sp
/* scause, stval, sepc, sp */
call handle_trap
/* need to switch new thread */
la s0, rt_thread_switch_interrupt_flag
lw s2, 0(s0)
beqz s2, spurious_interrupt
sw zero, 0(s0)
.global rt_hw_context_switch_interrupt_do
rt_hw_context_switch_interrupt_do:
#ifdef RT_USING_SMART
//swap to thread kernel stack
csrr t0, sstatus
andi t0, t0, 0x100
beqz t0, __restore_sp_from_tcb_interrupt
#endif
__restore_sp_from_sscratch_interrupt:
csrr t0, sscratch
j __move_stack_context_interrupt
#ifdef RT_USING_SMART
__restore_sp_from_tcb_interrupt:
la s0, rt_interrupt_from_thread
LOAD a0, 0(s0)
jal rt_thread_sp_to_thread
jal get_thread_kernel_stack_top
mv t0, a0
#endif
__move_stack_context_interrupt:
mv t1, sp//src
mv sp, t0//switch stack
addi sp, sp, -CTX_REG_NR * REGBYTES
//copy context
li s0, CTX_REG_NR//cnt
mv t2, sp//dst
copy_context_loop_interrupt:
LOAD t0, 0(t1)
STORE t0, 0(t2)
addi s0, s0, -1
addi t1, t1, 8
addi t2, t2, 8
bnez s0, copy_context_loop_interrupt
la s0, rt_interrupt_from_thread
LOAD s1, 0(s0)
STORE sp, 0(s1)
la s0, rt_interrupt_to_thread
LOAD s1, 0(s0)
LOAD sp, 0(s1)
#ifdef RT_USING_SMART
mv a0, s1
jal rt_thread_sp_to_thread
jal lwp_aspace_switch
#endif
spurious_interrupt:
RESTORE_ALL
sret
.global rt_hw_interrupt_enable
rt_hw_interrupt_enable:
csrs sstatus, a0 /* restore to old csr */
jr ra
.global rt_hw_interrupt_disable
rt_hw_interrupt_disable:
csrrci a0, sstatus, 2 /* clear SIE */
jr ra
|
vandercookking/h7_device_RTT
| 1,586
|
rt-thread/libcpu/aarch64/common/vector_gcc.S
|
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Date Author Notes
* 2018-10-06 ZhaoXiaowei the first version
*/
.text
.globl system_vectors
.globl vector_exception
.globl vector_irq
.globl vector_fiq
system_vectors:
.align 11
.set VBAR, system_vectors
.org VBAR
/* Exception from CurrentEL (EL1) with SP_EL0 (SPSEL=1) */
.org (VBAR + 0x00 + 0)
B vector_serror /* Synchronous */
.org (VBAR + 0x80 + 0)
B vector_serror /* IRQ/vIRQ */
.org (VBAR + 0x100 + 0)
B vector_serror /* FIQ/vFIQ */
.org (VBAR + 0x180 + 0)
B vector_serror /* Error/vError */
/* Exception from CurrentEL (EL1) with SP_ELn */
.org (VBAR + 0x200 + 0)
B vector_exception /* Synchronous */
.org (VBAR + 0x280 + 0)
B vector_irq /* IRQ/vIRQ */
.org (VBAR + 0x300 + 0)
B vector_fiq /* FIQ/vFIQ */
.org (VBAR + 0x380 + 0)
B vector_serror
/* Exception from lower EL, aarch64 */
.org (VBAR + 0x400 + 0)
B vector_exception
.org (VBAR + 0x480 + 0)
B vector_irq
.org (VBAR + 0x500 + 0)
B vector_fiq
.org (VBAR + 0x580 + 0)
B vector_serror
/* Exception from lower EL, aarch32 */
.org (VBAR + 0x600 + 0)
B vector_serror
.org (VBAR + 0x680 + 0)
B vector_serror
.org (VBAR + 0x700 + 0)
B vector_serror
.org (VBAR + 0x780 + 0)
B vector_serror
|
vandercookking/h7_device_RTT
| 13,113
|
rt-thread/libcpu/aarch64/common/context_gcc.S
|
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven the first version
* 2023-06-24 WangXiaoyao Support backtrace for user thread
*/
#include "rtconfig.h"
#include "asm-generic.h"
#include "asm-fpu.h"
.text
.weak rt_hw_cpu_id_set
.type rt_hw_cpu_id_set, @function
rt_hw_cpu_id_set:
mrs x0, mpidr_el1 /* MPIDR_EL1: Multi-Processor Affinity Register */
#ifdef ARCH_ARM_CORTEX_A55
lsr x0, x0, #8
#endif
and x0, x0, #15
msr tpidr_el1, x0
ret
/*
int rt_hw_cpu_id(void)
*/
.weak rt_hw_cpu_id
.type rt_hw_cpu_id, @function
rt_hw_cpu_id:
mrs x0, tpidr_el1 /* MPIDR_EL1: Multi-Processor Affinity Register */
ret
/*
void rt_hw_set_process_id(size_t id)
*/
.global rt_hw_set_process_id
rt_hw_set_process_id:
msr CONTEXTIDR_EL1, x0
ret
/*
*enable gtimer
*/
.globl rt_hw_gtimer_enable
rt_hw_gtimer_enable:
MOV X0,#1
MSR CNTP_CTL_EL0,X0
RET
/*
*set gtimer CNTP_TVAL_EL0 value
*/
.globl rt_hw_set_gtimer_val
rt_hw_set_gtimer_val:
MSR CNTP_TVAL_EL0,X0
RET
/*
*get gtimer CNTP_TVAL_EL0 value
*/
.globl rt_hw_get_gtimer_val
rt_hw_get_gtimer_val:
MRS X0,CNTP_TVAL_EL0
RET
.globl rt_hw_get_cntpct_val
rt_hw_get_cntpct_val:
MRS X0, CNTPCT_EL0
RET
/*
*get gtimer frq value
*/
.globl rt_hw_get_gtimer_frq
rt_hw_get_gtimer_frq:
MRS X0,CNTFRQ_EL0
RET
START_POINT(_thread_start)
blr x19
mov x29, #0
blr x20
b . /* never here */
START_POINT_END(_thread_start)
.macro SAVE_CONTEXT
/* Save the entire context. */
SAVE_FPU SP
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
STP X4, X5, [SP, #-0x10]!
STP X6, X7, [SP, #-0x10]!
STP X8, X9, [SP, #-0x10]!
STP X10, X11, [SP, #-0x10]!
STP X12, X13, [SP, #-0x10]!
STP X14, X15, [SP, #-0x10]!
STP X16, X17, [SP, #-0x10]!
STP X18, X19, [SP, #-0x10]!
STP X20, X21, [SP, #-0x10]!
STP X22, X23, [SP, #-0x10]!
STP X24, X25, [SP, #-0x10]!
STP X26, X27, [SP, #-0x10]!
STP X28, X29, [SP, #-0x10]!
MRS X28, FPCR
MRS X29, FPSR
STP X28, X29, [SP, #-0x10]!
MRS X29, SP_EL0
STP X29, X30, [SP, #-0x10]!
MRS X3, SPSR_EL1
MRS X2, ELR_EL1
STP X2, X3, [SP, #-0x10]!
MOV X0, SP /* Move SP into X0 for saving. */
.endm
.macro SAVE_CONTEXT_FROM_EL1
/* Save the entire context. */
SAVE_FPU SP
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
STP X4, X5, [SP, #-0x10]!
STP X6, X7, [SP, #-0x10]!
STP X8, X9, [SP, #-0x10]!
STP X10, X11, [SP, #-0x10]!
STP X12, X13, [SP, #-0x10]!
STP X14, X15, [SP, #-0x10]!
STP X16, X17, [SP, #-0x10]!
STP X18, X19, [SP, #-0x10]!
STP X20, X21, [SP, #-0x10]!
STP X22, X23, [SP, #-0x10]!
STP X24, X25, [SP, #-0x10]!
STP X26, X27, [SP, #-0x10]!
STP X28, X29, [SP, #-0x10]!
MRS X28, FPCR
MRS X29, FPSR
STP X28, X29, [SP, #-0x10]!
MRS X29, SP_EL0
STP X29, X30, [SP, #-0x10]!
MOV X19, #((3 << 6) | 0x4 | 0x1) /* el1h, disable interrupt */
MOV X18, X30
STP X18, X19, [SP, #-0x10]!
.endm
#ifdef RT_USING_SMP
.macro RESTORE_CONTEXT
/* Set the SP to point to the stack of the task being restored. */
MOV SP, X0
LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
TST X3, #0x1f
MSR SPSR_EL1, X3
MSR ELR_EL1, X2
LDP X29, X30, [SP], #0x10
MSR SP_EL0, X29
LDP X28, X29, [SP], #0x10
MSR FPCR, X28
MSR FPSR, X29
LDP X28, X29, [SP], #0x10
LDP X26, X27, [SP], #0x10
LDP X24, X25, [SP], #0x10
LDP X22, X23, [SP], #0x10
LDP X20, X21, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
RESTORE_FPU SP
#ifdef RT_USING_LWP
BEQ arch_ret_to_user
#endif
ERET
.endm
#else
.macro RESTORE_CONTEXT
/* Set the SP to point to the stack of the task being restored. */
MOV SP, X0
#ifdef RT_USING_LWP
BL rt_thread_self
MOV X19, X0
BL lwp_aspace_switch
MOV X0, X19
BL lwp_user_setting_restore
#endif
LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
TST X3, #0x1f
MSR SPSR_EL1, X3
MSR ELR_EL1, X2
LDP X29, X30, [SP], #0x10
MSR SP_EL0, X29
LDP X28, X29, [SP], #0x10
MSR FPCR, X28
MSR FPSR, X29
LDP X28, X29, [SP], #0x10
LDP X26, X27, [SP], #0x10
LDP X24, X25, [SP], #0x10
LDP X22, X23, [SP], #0x10
LDP X20, X21, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
RESTORE_FPU SP
#ifdef RT_USING_LWP
BEQ arch_ret_to_user
#endif
ERET
.endm
#endif
.macro RESTORE_CONTEXT_WITHOUT_MMU_SWITCH
/* the SP is already ok */
LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
TST X3, #0x1f
MSR SPSR_EL1, X3
MSR ELR_EL1, X2
LDP X29, X30, [SP], #0x10
MSR SP_EL0, X29
LDP X28, X29, [SP], #0x10
MSR FPCR, X28
MSR FPSR, X29
LDP X28, X29, [SP], #0x10
LDP X26, X27, [SP], #0x10
LDP X24, X25, [SP], #0x10
LDP X22, X23, [SP], #0x10
LDP X20, X21, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
RESTORE_FPU SP
#ifdef RT_USING_LWP
BEQ arch_ret_to_user
#endif
ERET
.endm
.macro SAVE_USER_CTX
MRS X1, SPSR_EL1
AND X1, X1, 0xf
CMP X1, XZR
BNE 1f
BL lwp_uthread_ctx_save
LDP X0, X1, [SP]
1:
.endm
#ifdef RT_USING_SMP
#define rt_hw_interrupt_disable rt_hw_local_irq_disable
#define rt_hw_interrupt_enable rt_hw_local_irq_enable
#endif
.text
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS X0, DAIF
MSR DAIFSet, #3
DSB SY
RET
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
DSB SY
AND X0, X0, #0xc0
MRS X1, DAIF
BIC X1, X1, #0xc0
ORR X0, X0, X1
MSR DAIF, X0
RET
.text
#ifdef RT_USING_SMP
/*
* void rt_hw_context_switch_to(rt_uint3 to, struct rt_thread *to_thread);
* X0 --> to (thread stack)
* X1 --> to_thread
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
LDR X0, [X0]
MOV SP, X0
MOV X0, X1
BL rt_cpus_lock_status_restore
#ifdef RT_USING_LWP
BL rt_thread_self
BL lwp_user_setting_restore
#endif
B rt_hw_context_switch_exit
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32
to, struct rt_thread *to_thread);
* X0 --> from (from_thread stack)
* X1 --> to (to_thread stack)
* X2 --> to_thread
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
SAVE_CONTEXT_FROM_EL1
MOV X3, SP
STR X3, [X0] // store sp in preempted tasks TCB
LDR X0, [X1] // get new task stack pointer
MOV SP, X0
MOV X0, X2
BL rt_cpus_lock_status_restore
#ifdef RT_USING_LWP
BL rt_thread_self
BL lwp_user_setting_restore
#endif
B rt_hw_context_switch_exit
/*
* void rt_hw_context_switch_interrupt(context, from sp, to sp, tp tcb)
* X0 :interrupt context
* X1 :addr of from_thread's sp
* X2 :addr of to_thread's sp
* X3 :to_thread's tcb
*/
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
STP X29, X30, [SP, #-0x10]!
#ifdef RT_USING_LWP
BL rt_thread_self
BL lwp_user_setting_save
#endif
LDP X29, X30, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
STR X0, [X1]
LDR X0, [X2]
MOV SP, X0
MOV X0, X3
MOV X19, X0
BL rt_cpus_lock_status_restore
MOV X0, X19
#ifdef RT_USING_LWP
BL lwp_user_setting_restore
#endif
B rt_hw_context_switch_exit
.globl vector_fiq
vector_fiq:
B .
.globl vector_irq
vector_irq:
SAVE_CONTEXT
STP X0, X1, [SP, #-0x10]! /* X0 is thread sp */
BL rt_interrupt_enter
LDP X0, X1, [SP]
#ifdef RT_USING_LWP
SAVE_USER_CTX
#endif
BL rt_hw_trap_irq
#ifdef RT_USING_LWP
BL lwp_uthread_ctx_restore
#endif
BL rt_interrupt_leave
LDP X0, X1, [SP], #0x10
BL rt_scheduler_do_irq_switch
B rt_hw_context_switch_exit
.global rt_hw_context_switch_exit
rt_hw_context_switch_exit:
CLREX
MOV X0, SP
RESTORE_CONTEXT
#else /* RT_USING_SMP */
/*
* void rt_hw_context_switch_to(rt_ubase_t to);
* X0 --> to sp
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
CLREX
LDR X0, [X0]
RESTORE_CONTEXT
/*
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
* X0 --> from sp
* X1 --> to sp
* X2 --> to thread
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
CLREX
SAVE_CONTEXT_FROM_EL1
MOV X2, SP
STR X2, [X0] // store sp in preempted tasks TCB
LDR X0, [X1] // get new task stack pointer
RESTORE_CONTEXT
/*
* void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
CLREX
LDR X6, =rt_thread_switch_interrupt_flag
LDR X7, [X6]
CMP X7, #1
B.EQ _reswitch
LDR X4, =rt_interrupt_from_thread // set rt_interrupt_from_thread
STR X0, [X4]
MOV X7, #1 // set rt_thread_switch_interrupt_flag to 1
STR X7, [X6]
STP X1, X30, [SP, #-0x10]!
#ifdef RT_USING_LWP
MOV X0, X2
BL lwp_user_setting_save
#endif
LDP X1, X30, [SP], #0x10
_reswitch:
LDR X6, =rt_interrupt_to_thread // set rt_interrupt_to_thread
STR X1, [X6]
RET
.text
// -- Exception handlers ----------------------------------
.align 8
.globl vector_fiq
vector_fiq:
SAVE_CONTEXT
STP X0, X1, [SP, #-0x10]!
BL rt_hw_trap_fiq
LDP X0, X1, [SP], #0x10
RESTORE_CONTEXT
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
// -------------------------------------------------------------------
.align 8
.globl vector_irq
vector_irq:
SAVE_CONTEXT
STP X0, X1, [SP, #-0x10]! /* X0 is thread sp */
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
LDP X0, X1, [SP], #0x10
// if rt_thread_switch_interrupt_flag set, jump to
// rt_hw_context_switch_interrupt_do and don't return
LDR X1, =rt_thread_switch_interrupt_flag
LDR X2, [X1]
CMP X2, #1
B.NE vector_irq_exit
MOV X2, #0 // clear flag
STR X2, [X1]
LDR X3, =rt_interrupt_from_thread
LDR X4, [X3]
STR x0, [X4] // store sp in preempted tasks's TCB
LDR x3, =rt_interrupt_to_thread
LDR X4, [X3]
LDR x0, [X4] // get new task's stack pointer
RESTORE_CONTEXT
vector_irq_exit:
MOV SP, X0
RESTORE_CONTEXT_WITHOUT_MMU_SWITCH
#endif /* RT_USING_SMP */
// -------------------------------------------------
START_POINT(vector_exception)
SAVE_CONTEXT
STP X0, X1, [SP, #-0x10]!
#ifdef RT_USING_LWP
SAVE_USER_CTX
#endif
BL rt_hw_trap_exception
#ifdef RT_USING_LWP
BL lwp_uthread_ctx_restore
#endif
LDP X0, X1, [SP], #0x10
MOV SP, X0
RESTORE_CONTEXT_WITHOUT_MMU_SWITCH
START_POINT_END(vector_exception)
START_POINT(vector_serror)
SAVE_CONTEXT
#ifdef RT_USING_LWP
SAVE_USER_CTX
#endif
STP X0, X1, [SP, #-0x10]!
BL rt_hw_trap_serror
b .
START_POINT_END(vector_exception)
|
vandercookking/h7_device_RTT
| 1,295
|
rt-thread/libcpu/aarch64/common/cpu_gcc.S
|
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Date Author Notes
* 2018-10-06 ZhaoXiaowei the first version
*/
.text
.globl rt_hw_get_current_el
rt_hw_get_current_el:
MRS X0, CurrentEL
CMP X0, 0xc
B.EQ 3f
CMP X0, 0x8
B.EQ 2f
CMP X0, 0x4
B.EQ 1f
LDR X0, =0
B 0f
3:
LDR X0, =3
B 0f
2:
LDR X0, =2
B 0f
1:
LDR X0, =1
B 0f
0:
RET
.globl rt_hw_set_current_vbar
rt_hw_set_current_vbar:
MRS X1, CurrentEL
CMP X1, 0xc
B.EQ 3f
CMP X1, 0x8
B.EQ 2f
CMP X1, 0x4
B.EQ 1f
B 0f
3:
MSR VBAR_EL3,X0
B 0f
2:
MSR VBAR_EL2,X0
B 0f
1:
MSR VBAR_EL1,X0
B 0f
0:
RET
.globl rt_hw_set_elx_env
rt_hw_set_elx_env:
MRS X1, CurrentEL
CMP X1, 0xc
B.EQ 3f
CMP X1, 0x8
B.EQ 2f
CMP X1, 0x4
B.EQ 1f
B 0f
3:
MRS X0, SCR_EL3
ORR X0, X0, #0xF /* SCR_EL3.NS|IRQ|FIQ|EA */
MSR SCR_EL3, X0
B 0f
2:
MRS X0, HCR_EL2
ORR X0, X0, #0x38
MSR HCR_EL2, X0
B 0f
1:
B 0f
0:
RET
.global rt_cpu_vector_set_base
rt_cpu_vector_set_base:
MSR VBAR_EL1,X0
RET
/**
* unsigned long rt_hw_ffz(unsigned long x)
*/
.global rt_hw_ffz
rt_hw_ffz:
mvn x1, x0
clz x0, x1
mov x1, #0x3f
sub x0, x1, x0
ret
.global rt_hw_clz
rt_hw_clz:
clz x0, x0
ret
|
vandercookking/h7_device_RTT
| 5,180
|
rt-thread/libcpu/aarch64/common/cache.S
|
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-03-17 bigmagic first version
*/
/*
* void __asm_dcache_level(level)
*
* flush or invalidate one level cache.
*
* x0: cache level
* x1: 0 clean & invalidate, 1 invalidate only
* x2~x9: clobbered
*/
.globl __asm_dcache_level
__asm_dcache_level:
lsl x12, x0, #1
msr csselr_el1, x12 /* select cache level */
isb /* sync change of cssidr_el1 */
mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
add x2, x2, #4 /* x2 <- log2(cache line size) */
mov x3, #0x3ff
and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
clz w5, w3 /* bit position of #ways */
mov x4, #0x7fff
and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
/* x12 <- cache level << 1 */
/* x2 <- line length offset */
/* x3 <- number of cache ways - 1 */
/* x4 <- number of cache sets - 1 */
/* x5 <- bit position of #ways */
loop_set:
mov x6, x3 /* x6 <- working copy of #ways */
loop_way:
lsl x7, x6, x5
orr x9, x12, x7 /* map way and level to cisw value */
lsl x7, x4, x2
orr x9, x9, x7 /* map set number to cisw value */
tbz w1, #0, 1f
dc isw, x9
b 2f
1: dc cisw, x9 /* clean & invalidate by set/way */
2: subs x6, x6, #1 /* decrement the way */
b.ge loop_way
subs x4, x4, #1 /* decrement the set */
b.ge loop_set
ret
/*
* void __asm_flush_dcache_all(int invalidate_only)
*
* x0: 0 clean & invalidate, 1 invalidate only
*
* flush or invalidate all data cache by SET/WAY.
*/
.globl __asm_dcache_all
__asm_dcache_all:
mov x1, x0
dsb sy
mrs x10, clidr_el1 /* read clidr_el1 */
lsr x11, x10, #24
and x11, x11, #0x7 /* x11 <- loc */
cbz x11, finished /* if loc is 0, exit */
mov x15, lr
mov x0, #0 /* start flush at cache level 0 */
/* x0 <- cache level */
/* x10 <- clidr_el1 */
/* x11 <- loc */
/* x15 <- return address */
loop_level:
lsl x12, x0, #1
add x12, x12, x0 /* x0 <- tripled cache level */
lsr x12, x10, x12
and x12, x12, #7 /* x12 <- cache type */
cmp x12, #2
b.lt skip /* skip if no cache or icache */
bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
skip:
add x0, x0, #1 /* increment cache level */
cmp x11, x0
b.gt loop_level
mov x0, #0
msr csselr_el1, x0 /* restore csselr_el1 */
dsb sy
isb
mov lr, x15
finished:
ret
.globl __asm_flush_dcache_all
__asm_flush_dcache_all:
mov x0, #0
b __asm_dcache_all
.globl __asm_invalidate_dcache_all
__asm_invalidate_dcache_all:
mov x0, #0x1
b __asm_dcache_all
/*
* void __asm_flush_dcache_range(start, end)
*
* clean & invalidate data cache in the range
*
* x0: start address
* x1: end address
*/
.globl __asm_flush_dcache_range
__asm_flush_dcache_range:
mrs x3, ctr_el0
lsr x3, x3, #16
and x3, x3, #0xf
mov x2, #4
lsl x2, x2, x3 /* cache line size */
/* x2 <- minimal cache line size in cache system */
sub x3, x2, #1
bic x0, x0, x3
1: dc civac, x0 /* clean & invalidate data or unified cache */
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret
/* void __asm_invalidate_dcache_range(start, end)
*
* invalidate data cache in the range
*
* x0: start address
* x1: end address
*/
.globl __asm_invalidate_dcache_range
__asm_invalidate_dcache_range:
mrs x3, ctr_el0
lsr x3, x3, #16
and x3, x3, #0xf
mov x2, #4
lsl x2, x2, x3 /* cache line size */
/* x2 <- minimal cache line size in cache system */
sub x3, x2, #1
bic x0, x0, x3
1: dc ivac, x0 /* invalidate data or unified cache */
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret
/* void __asm_invalidate_icache_range(start, end)
*
* invalidate icache in the range
*
* x0: start address
* x1: end address
*/
.globl __asm_invalidate_icache_range
__asm_invalidate_icache_range:
mrs x3, ctr_el0
and x3, x3, #0xf
mov x2, #4
lsl x2, x2, x3 /* cache line size */
/* x2 <- minimal cache line size in cache system */
sub x3, x2, #1
bic x0, x0, x3
1: ic ivau, x0 /* invalidate instruction or unified cache */
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret
/*
* void __asm_invalidate_icache_all(void)
*
* invalidate all tlb entries.
*/
.globl __asm_invalidate_icache_all
__asm_invalidate_icache_all:
dsb sy
ic ialluis
isb sy
ret
.globl __asm_flush_l3_cache
__asm_flush_l3_cache:
mov x0, #0 /* return status as success */
ret
|
vandercookking/h7_device_RTT
| 10,309
|
rt-thread/libcpu/aarch64/cortex-a/entry_point.S
|
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Date Author Notes
* 2020-01-15 bigmagic the first version
* 2020-08-10 SummerGift support clang compiler
* 2023-04-29 GuEe-GUI support kernel's ARM64 boot header
*/
#include "rtconfig.h"
.section ".text.entrypoint","ax"
#ifdef RT_USING_OFW
/*
* Our goal is to boot the rt-thread as possible without modifying the
* bootloader's config, so we use the kernel's boot header for ARM64:
* https://www.kernel.org/doc/html/latest/arm64/booting.html#call-the-kernel-image
*/
_head:
b _start /* Executable code */
.long 0 /* Executable code */
.quad _text_offset /* Image load offset from start of RAM, little endian */
.quad _end - _head /* Effective Image size, little endian (_end defined in link.lds) */
.quad 0xa /* Kernel flags, little endian */
.quad 0 /* Reserved */
.quad 0 /* Reserved */
.quad 0 /* Reserved */
.ascii "ARM\x64" /* Magic number */
.long 0 /* Reserved (used for PE COFF offset) */
#endif
/* Variable registers: x21~x28 */
dtb_paddr .req x21
boot_arg0 .req x22
boot_arg1 .req x23
boot_arg2 .req x24
stack_top .req x25
.global _start
_start:
/*
* Boot CPU general-purpose register settings:
* x0 = physical address of device tree blob (dtb) in system RAM.
* x1 = 0 (reserved for future use)
* x2 = 0 (reserved for future use)
* x3 = 0 (reserved for future use)
*/
mov dtb_paddr, x0
mov boot_arg0, x1
mov boot_arg1, x2
mov boot_arg2, x3
#ifdef ARCH_ARM_BOOTWITH_FLUSH_CACHE
bl __asm_flush_dcache_all
#endif
bl rt_hw_cpu_id_set
/* read cpu id, stop slave cores */
mrs x0, tpidr_el1
cbz x0, .L__cpu_0 /* .L prefix is the local label in ELF */
#ifndef RT_AMP_SLAVE
/* cpu id > 0, stop */
/* cpu id == 0 will also goto here after returned from entry() if possible */
.L__current_cpu_idle:
wfe
b .L__current_cpu_idle
#endif
.L__cpu_0:
/* set stack before our code, Define stack pointer for current exception level */
adr x1, .el_stack_top
/* set up EL1 */
mrs x0, CurrentEL /* CurrentEL Register. bit 2, 3. Others reserved */
and x0, x0, #12 /* clear reserved bits */
/* running at EL3? */
cmp x0, #12 /* 1100b. So, EL3 */
bne .L__not_in_el3 /* 11? !EL3 -> 5: */
/* should never be executed, just for completeness. (EL3) */
mov x2, #0x5b1
msr scr_el3, x2 /* SCR_ELn Secure Configuration Register */
mov x2, #0x3c9
msr spsr_el3, x2 /* SPSR_ELn. Saved Program Status Register. 1111001001 */
adr x2, .L__not_in_el3
msr elr_el3, x2
eret /* Exception Return: from EL3, continue from .L__not_in_el3 */
.L__not_in_el3: /* running at EL2 or EL1 */
cmp x0, #4 /* 0x04 0100 EL1 */
beq .L__in_el1 /* EL1 -> 5: */
mrs x0, hcr_el2
bic x0, x0, #0xff
msr hcr_el2, x0
msr sp_el1, x1 /* in EL2, set sp of EL1 to _start */
/* enable CNTP for EL1 */
mrs x0, cnthctl_el2 /* Counter-timer Hypervisor Control register */
orr x0, x0, #3
msr cnthctl_el2, x0
msr cntvoff_el2, xzr
/* enable AArch64 in EL1 */
mov x0, #(1 << 31) /* AArch64 */
orr x0, x0, #(1 << 1) /* SWIO hardwired on Pi3 */
msr hcr_el2, x0
mrs x0, hcr_el2
/* change execution level to EL1 */
mov x2, #0x3c4
msr spsr_el2, x2 /* 1111000100 */
adr x2, .L__in_el1
msr elr_el2, x2
eret /* exception return. from EL2. continue from .L__in_el1 */
.macro GET_PHY reg, symbol
adrp \reg, \symbol
add \reg, \reg, #:lo12:\symbol
.endm
.L__in_el1:
mov sp, x1 /* in EL1. Set sp to _start */
/* Set CPACR_EL1 (Architecture Feature Access Control Register) to avoid trap from SIMD or float point instruction */
mov x1, #0x00300000 /* Don't trap any SIMD/FP instructions in both EL0 and EL1 */
msr cpacr_el1, x1
/* applying context change */
dsb ish
isb
/* clear bss */
GET_PHY x1, __bss_start
GET_PHY x2, __bss_end
sub x2, x2, x1 /* get bss size */
and x3, x2, #7 /* x3 is < 7 */
ldr x4, =~0x7
and x2, x2, x4 /* mask ~7 */
.L__clean_bss_loop:
cbz x2, .L__clean_bss_loop_1
str xzr, [x1], #8
sub x2, x2, #8
b .L__clean_bss_loop
.L__clean_bss_loop_1:
cbz x3, .L__jump_to_entry
strb wzr, [x1], #1
sub x3, x3, #1
b .L__clean_bss_loop_1
.L__jump_to_entry: /* jump to C code, should not return */
bl mmu_tcr_init
bl get_ttbrn_base
add x1, x0, #0x1000
msr ttbr0_el1, x0
msr ttbr1_el1, x1
dsb sy
#ifdef RT_USING_SMART
ldr x2, =_start
GET_PHY x3, _start
sub x3, x3, x2
#else
mov x3,0
#endif
ldr x2, =0x10000000 /* map 256M memory for kernel space */
bl rt_hw_mem_setup_early
ldr x30, =after_mmu_enable /* set LR to after_mmu_enable function, it's a v_addr */
mrs x1, sctlr_el1
bic x1, x1, #(3 << 3) /* dis SA, SA0 */
bic x1, x1, #(1 << 1) /* dis A */
orr x1, x1, #(1 << 12) /* I */
orr x1, x1, #(1 << 2) /* C */
orr x1, x1, #(1 << 0) /* M */
msr sctlr_el1, x1 /* enable MMU */
dsb ish
isb
ic ialluis /* Invalidate all instruction caches in Inner Shareable domain to Point of Unification */
dsb ish
isb
tlbi vmalle1 /* Invalidate all stage 1 translations used at EL1 with the current VMID */
dsb ish
isb
ret
after_mmu_enable:
#ifdef RT_USING_SMART
mrs x0, tcr_el1 /* disable ttbr0, only using kernel space */
orr x0, x0, #(1 << 7)
msr tcr_el1, x0
msr ttbr0_el1, xzr
dsb sy
#endif
mov x0, #1
msr spsel, x0
adr x1, .el_stack_top
mov sp, x1 /* sp_el1 set to _start */
b rtthread_startup
#ifdef RT_USING_SMP
/**
* secondary cpu
*/
.global _secondary_cpu_entry
_secondary_cpu_entry:
bl rt_hw_cpu_id_set
adr x1, .el_stack_top
/* set up EL1 */
mrs x0, CurrentEL /* CurrentEL Register. bit 2, 3. Others reserved */
and x0, x0, #12 /* clear reserved bits */
/* running at EL3? */
cmp x0, #12 /* 1100b. So, EL3 */
bne .L__not_in_el3_cpux /* 11? !EL3 -> 5: */
/* should never be executed, just for completeness. (EL3) */
mov x2, #0x5b1
msr scr_el3, x2 /* SCR_ELn Secure Configuration Register */
mov x2, #0x3c9
msr spsr_el3, x2 /* SPSR_ELn. Saved Program Status Register. 1111001001 */
adr x2, .L__not_in_el3_cpux
msr elr_el3, x2
eret /* Exception Return: from EL3, continue from .L__not_in_el3 */
.L__not_in_el3_cpux: /* running at EL2 or EL1 */
cmp x0, #4 /* 0x04 0100 EL1 */
beq .L__in_el1_cpux /* EL1 -> 5: */
mrs x0, hcr_el2
bic x0, x0, #0xff
msr hcr_el2, x0
msr sp_el1, x1 /* in EL2, set sp of EL1 to _start */
/* enable CNTP for EL1 */
mrs x0, cnthctl_el2 /* Counter-timer Hypervisor Control register */
orr x0, x0, #3
msr cnthctl_el2, x0
msr cntvoff_el2, xzr
/* enable AArch64 in EL1 */
mov x0, #(1 << 31) /* AArch64 */
orr x0, x0, #(1 << 1) /* SWIO hardwired on Pi3 */
msr hcr_el2, x0
mrs x0, hcr_el2
/* change execution level to EL1 */
mov x2, #0x3c4
msr spsr_el2, x2 /* 1111000100 */
adr x2, .L__in_el1_cpux
msr elr_el2, x2
eret /* exception return. from EL2. continue from .L__in_el1 */
.L__in_el1_cpux:
mrs x0, tpidr_el1
/* each cpu init stack is 8k */
sub x1, x1, x0, lsl #13
mov sp, x1 /* in EL1. Set sp to _start */
/* Set CPACR_EL1 (Architecture Feature Access Control Register) to avoid trap from SIMD or float point instruction */
mov x1, #0x00300000 /* Don't trap any SIMD/FP instructions in both EL0 and EL1 */
msr cpacr_el1, x1
.L__jump_to_entry_cpux: /* jump to C code, should not return */
/* init mmu early */
bl mmu_tcr_init
bl get_ttbrn_base
add x1, x0, #0x1000
msr ttbr0_el1, x0
msr ttbr1_el1, x1
dsb sy
ldr x30, =after_mmu_enable_cpux /* set LR to after_mmu_enable function, it's a v_addr */
mrs x1, sctlr_el1
bic x1, x1, #(3 << 3) /* dis SA, SA0 */
bic x1, x1, #(1 << 1) /* dis A */
orr x1, x1, #(1 << 12) /* I */
orr x1, x1, #(1 << 2) /* C */
orr x1, x1, #(1 << 0) /* M */
msr sctlr_el1, x1 /* enable MMU */
dsb sy
isb sy
ic ialluis /* Invalidate all instruction caches in Inner Shareable domain to Point of Unification */
dsb sy
isb sy
tlbi vmalle1 /* Invalidate all stage 1 translations used at EL1 with the current VMID */
dsb sy
isb sy
ret
after_mmu_enable_cpux:
#ifdef RT_USING_SMART
mrs x0, tcr_el1 /* disable ttbr0, only using kernel space */
orr x0, x0, #(1 << 7)
msr tcr_el1, x0
msr ttbr0_el1, xzr
dsb sy
#endif
mov x0, #1
msr spsel, x0
mrs x0, tpidr_el1
/* each cpu init stack is 8k */
adr x1, .el_stack_top
sub x1, x1, x0, lsl #13
mov sp, x1 /* in EL1. Set sp to _start */
b rt_hw_secondary_cpu_bsp_start
#endif
#ifndef RT_CPUS_NR
#define RT_CPUS_NR 1
#endif
.align 12
.el_stack:
.space (8192 * RT_CPUS_NR)
.el_stack_top:
|
vandercookking/h7_device_RTT
| 8,114
|
rt-thread/libcpu/arc/em/contex_gcc_mw.S
|
/*
* Copyright (c) 2018, Synopsys, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#define __ASSEMBLY__
#include "include/arc/arc.h"
#include "include/arc/arc_asm_common.h"
.global rt_interrupt_enter;
.global rt_interrupt_leave;
.global rt_thread_switch_interrupt_flag;
.global rt_interrupt_from_thread;
.global rt_interrupt_to_thread;
.global exc_nest_count;
.global set_hw_stack_check;
.text
.align 4
dispatcher:
st sp, [r0]
ld sp, [r1]
#if ARC_FEATURE_STACK_CHECK
#if ARC_FEATURE_SEC_PRESENT
lr r0, [AUX_SEC_STAT]
bclr r0, r0, AUX_SEC_STAT_BIT_SSC
sflag r0
#else
lr r0, [AUX_STATUS32]
bclr r0, r0, AUX_STATUS_BIT_SC
kflag r0
#endif
jl set_hw_stack_check
#if ARC_FEATURE_SEC_PRESENT
lr r0, [AUX_SEC_STAT]
bset r0, r0, AUX_SEC_STAT_BIT_SSC
sflag r0
#else
lr r0, [AUX_STATUS32]
bset r0, r0, AUX_STATUS_BIT_SC
kflag r0
#endif
#endif
pop r0
j [r0]
/* return routine when task dispatch happened in task context */
dispatch_r:
RESTORE_NONSCRATCH_REGS
RESTORE_R0_TO_R12
j [blink]
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.global rt_hw_interrupt_disable
.align 4
rt_hw_interrupt_disable:
clri r0
j [blink]
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.global rt_hw_interrupt_enable
.align 4
rt_hw_interrupt_enable:
seti r0
j [blink]
.global rt_hw_context_switch_interrupt
.align 4
rt_hw_context_switch_interrupt:
ld r2, [rt_thread_switch_interrupt_flag]
breq r2, 1, _reswitch /* Check the flag, if it is 1, skip to reswitch */
mov r2, 1
st r2, [rt_thread_switch_interrupt_flag]
st r0, [rt_interrupt_from_thread]
_reswitch:
st r1, [rt_interrupt_to_thread]
j [blink]
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.global rt_hw_context_switch
.align 4
rt_hw_context_switch:
SAVE_R0_TO_R12
SAVE_NONSCRATCH_REGS
mov r2, dispatch_r
push r2
b dispatcher
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.global rt_hw_context_switch_to
.align 4
rt_hw_context_switch_to:
ld sp, [r0]
#if ARC_FEATURE_STACK_CHECK
mov r1, r0
#if ARC_FEATURE_SEC_PRESENT
lr r0, [AUX_SEC_STAT]
bclr r0, r0, AUX_SEC_STAT_BIT_SSC
sflag r0
#else
lr r0, [AUX_STATUS32]
bclr r0, r0, AUX_STATUS_BIT_SC
kflag r0
#endif
jl set_hw_stack_check
#if ARC_FEATURE_SEC_PRESENT
lr r0, [AUX_SEC_STAT]
bset r0, r0, AUX_SEC_STAT_BIT_SSC
sflag r0
#else
lr r0, [AUX_STATUS32]
bset r0, r0, AUX_STATUS_BIT_SC
kflag r0
#endif
#endif
pop r0
j [r0]
.global start_r
.align 4
start_r:
pop blink;
pop r1
pop r2
pop r0
j_s.d [r1]
kflag r2
/*
* int __rt_ffs(int value);
* r0 --> value
*/
.global __rt_ffs
.align 4
__rt_ffs:
breq r0, 0, __rt_ffs_return
ffs r1, r0
add r0, r1, 1
__rt_ffs_return:
j [blink]
/****** exceptions and interrupts handing ******/
/****** entry for exception handling ******/
.global exc_entry_cpu
.align 4
exc_entry_cpu:
EXCEPTION_PROLOGUE
mov blink, sp
mov r3, sp /* as exception handler's para(p_excinfo) */
ld r0, [exc_nest_count]
add r1, r0, 1
st r1, [exc_nest_count]
brne r0, 0, exc_handler_1
/* change to exception stack if interrupt happened in task context */
mov sp, _e_stack
exc_handler_1:
PUSH blink
lr r0, [AUX_ECR]
lsr r0, r0, 16
mov r1, exc_int_handler_table
ld.as r2, [r1, r0]
mov r0, r3
jl [r2]
/* interrupts are not allowed */
ret_exc:
POP sp
mov r1, exc_nest_count
ld r0, [r1]
sub r0, r0, 1
st r0, [r1]
brne r0, 0, ret_exc_1 /* nest exception case */
lr r1, [AUX_IRQ_ACT] /* nest interrupt case */
brne r1, 0, ret_exc_1
ld r0, [rt_thread_switch_interrupt_flag]
brne r0, 0, ret_exc_2
ret_exc_1: /* return from non-task context, interrupts or exceptions are nested */
EXCEPTION_EPILOGUE
rtie
/* there is a dispatch request */
ret_exc_2:
/* clear dispatch request */
mov r0, 0
st r0, [rt_thread_switch_interrupt_flag]
SAVE_CALLEE_REGS /* save callee save registers */
/* clear exception bit to do exception exit by SW */
lr r0, [AUX_STATUS32]
bclr r0, r0, AUX_STATUS_BIT_AE
kflag r0
mov r1, ret_exc_r /* save return address */
PUSH r1
ld r0, [rt_interrupt_from_thread]
ld r1, [rt_interrupt_to_thread]
b dispatcher
ret_exc_r:
/* recover exception status */
lr r0, [AUX_STATUS32]
bset r0, r0, AUX_STATUS_BIT_AE
kflag r0
RESTORE_CALLEE_REGS
EXCEPTION_EPILOGUE
rtie
/****** entry for normal interrupt exception handling ******/
.global exc_entry_int /* entry for interrupt handling */
.align 4
exc_entry_int:
#if ARC_FEATURE_FIRQ == 1
/* check whether it is P0 interrupt */
#if ARC_FEATURE_RGF_NUM_BANKS > 1
lr r0, [AUX_IRQ_ACT]
btst r0, 0
jnz exc_entry_firq
#else
PUSH r10
lr r10, [AUX_IRQ_ACT]
btst r10, 0
POP r10
jnz exc_entry_firq
#endif
#endif
INTERRUPT_PROLOGUE
mov blink, sp
clri /* disable interrupt */
ld r3, [exc_nest_count]
add r2, r3, 1
st r2, [exc_nest_count]
seti /* enable higher priority interrupt */
brne r3, 0, irq_handler_1
/* change to exception stack if interrupt happened in task context */
mov sp, _e_stack
#if ARC_FEATURE_STACK_CHECK
#if ARC_FEATURE_SEC_PRESENT
lr r0, [AUX_SEC_STAT]
bclr r0, r0, AUX_SEC_STAT_BIT_SSC
sflag r0
#else
lr r0, [AUX_STATUS32]
bclr r0, r0, AUX_STATUS_BIT_SC
kflag r0
#endif
#endif
irq_handler_1:
PUSH blink
jl rt_interrupt_enter
lr r0, [AUX_IRQ_CAUSE]
sr r0, [AUX_IRQ_SELECT]
mov r1, exc_int_handler_table
ld.as r2, [r1, r0] /* r2 = exc_int_handler_table + irqno *4 */
/* handle software triggered interrupt */
lr r3, [AUX_IRQ_HINT]
cmp r3, r0
bne.d irq_hint_handled
xor r3, r3, r3
sr r3, [AUX_IRQ_HINT]
irq_hint_handled:
lr r3, [AUX_IRQ_PRIORITY]
PUSH r3 /* save irq priority */
jl [r2] /* jump to interrupt handler */
jl rt_interrupt_leave
ret_int:
clri /* disable interrupt */
POP r3 /* irq priority */
POP sp
mov r1, exc_nest_count
ld r0, [r1]
sub r0, r0, 1
st r0, [r1]
/* if there are multi-bits set in IRQ_ACT, it's still in nest interrupt */
lr r0, [AUX_IRQ_CAUSE]
sr r0, [AUX_IRQ_SELECT]
lr r3, [AUX_IRQ_PRIORITY]
lr r1, [AUX_IRQ_ACT]
bclr r2, r1, r3
brne r2, 0, ret_int_1
ld r0, [rt_thread_switch_interrupt_flag]
brne r0, 0, ret_int_2
ret_int_1: /* return from non-task context */
INTERRUPT_EPILOGUE
rtie
/* there is a dispatch request */
ret_int_2:
/* clear dispatch request */
mov r0, 0
st r0, [rt_thread_switch_interrupt_flag]
/* interrupt return by SW */
lr r10, [AUX_IRQ_ACT]
PUSH r10
bclr r10, r10, r3 /* clear related bits in IRQ_ACT */
sr r10, [AUX_IRQ_ACT]
SAVE_CALLEE_REGS /* save callee save registers */
mov r1, ret_int_r /* save return address */
PUSH r1
ld r0, [rt_interrupt_from_thread]
ld r1, [rt_interrupt_to_thread]
b dispatcher
ret_int_r:
RESTORE_CALLEE_REGS
/* recover AUX_IRQ_ACT to restore the interrup status */
POPAX AUX_IRQ_ACT
INTERRUPT_EPILOGUE
rtie
/****** entry for fast irq exception handling ******/
.global exc_entry_firq
.weak exc_entry_firq
.align 4
exc_entry_firq:
SAVE_FIQ_EXC_REGS
lr r0, [AUX_IRQ_CAUSE]
mov r1, exc_int_handler_table
/* r2 = _kernel_exc_tbl + irqno *4 */
ld.as r2, [r1, r0]
/* for the case of software triggered interrupt */
lr r3, [AUX_IRQ_HINT]
cmp r3, r0
bne.d firq_hint_handled
xor r3, r3, r3
sr r3, [AUX_IRQ_HINT]
firq_hint_handled:
/* jump to interrupt handler */
mov r0, sp
jl [r2]
firq_return:
RESTORE_FIQ_EXC_REGS
rtie
|
vandercookking/h7_device_RTT
| 9,422
|
rt-thread/components/lwp/arch/arm/cortex-a/lwp_gcc.S
|
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-12-10 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
*/
#include "rtconfig.h"
#include "asm-generic.h"
#define Mode_USR 0x10
#define Mode_FIQ 0x11
#define Mode_IRQ 0x12
#define Mode_SVC 0x13
#define Mode_MON 0x16
#define Mode_ABT 0x17
#define Mode_UDF 0x1B
#define Mode_SYS 0x1F
#define A_Bit 0x100
#define I_Bit 0x80 @; when I bit is set, IRQ is disabled
#define F_Bit 0x40 @; when F bit is set, FIQ is disabled
#define T_Bit 0x20
.cpu cortex-a9
.syntax unified
.text
/*
* void arch_start_umode(args, text, ustack, kstack);
*/
.global arch_start_umode
.type arch_start_umode, % function
arch_start_umode:
mrs r9, cpsr
bic r9, #0x1f
orr r9, #Mode_USR
cpsid i
msr spsr, r9
mov sp, r3
mov r3, r2 ;/* user stack top */
/* set data address. */
movs pc, r1
/*
* void arch_crt_start_umode(args, text, ustack, kstack);
*/
.global arch_crt_start_umode
.type arch_crt_start_umode, % function
arch_crt_start_umode:
cps #Mode_SYS
sub sp, r2, #16
ldr r2, =lwp_thread_return
ldr r4, [r2]
str r4, [sp]
ldr r4, [r2, #4]
str r4, [sp, #4]
ldr r4, [r2, #8]
str r4, [sp, #8]
mov r4, sp
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
add r4, #4
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
add r4, #4
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
dsb
isb
mcr p15, 0, r4, c7, c5, 0 ;//iciallu
dsb
isb
mov lr, sp
cps #Mode_SVC
mrs r9, cpsr
bic r9, #0x1f
orr r9, #Mode_USR
cpsid i
msr spsr, r9
mov sp, r3
/* set data address. */
movs pc, r1
/*
void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
*/
.global arch_set_thread_context
arch_set_thread_context:
sub r1, #(10 * 4 + 4 * 4) /* {r4 - r12, lr} , {r4, r5, spsr, u_pc} */
stmfd r1!, {r0}
mov r12, #0
stmfd r1!, {r12}
stmfd r1!, {r1 - r12}
stmfd r1!, {r12} /* new thread return value */
mrs r12, cpsr
orr r12, #(1 << 7) /* disable irq */
stmfd r1!, {r12} /* spsr */
mov r12, #0
stmfd r1!, {r12} /* now user lr is 0 */
stmfd r1!, {r2} /* user sp */
#ifdef RT_USING_FPU
stmfd r1!, {r12} /* not use fpu */
#endif
str r1, [r3]
mov pc, lr
.global arch_get_user_sp
arch_get_user_sp:
cps #Mode_SYS
mov r0, sp
cps #Mode_SVC
mov pc, lr
.global sys_fork
.global sys_vfork
.global arch_fork_exit
sys_fork:
sys_vfork:
push {r4 - r12, lr}
bl _sys_fork
arch_fork_exit:
pop {r4 - r12, lr}
b arch_syscall_exit
.global sys_clone
.global arch_clone_exit
sys_clone:
push {r4 - r12, lr}
bl _sys_clone
arch_clone_exit:
pop {r4 - r12, lr}
b arch_syscall_exit
/*
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
*/
.global lwp_exec_user
lwp_exec_user:
cpsid i
mov sp, r1
mov lr, r2
mov r2, #Mode_USR
msr spsr_cxsf, r2
ldr r3, =0x80000000
b arch_ret_to_user
/*
* void SVC_Handler(void);
*/
.global vector_swi
.type vector_swi, % function
START_POINT(vector_swi)
push {lr}
mrs lr, spsr
push {r4, r5, lr}
cpsie i
push {r0 - r3, r12}
bl rt_thread_self
bl lwp_user_setting_save
and r0, r7, #0xf000
cmp r0, #0xe000
beq arch_signal_quit
cmp r0, #0xf000
beq ret_from_user
and r0, r7, #0xff
bl lwp_get_sys_api
cmp r0, #0 /* r0 = api */
mov lr, r0
pop {r0 - r3, r12}
beq arch_syscall_exit
blx lr
START_POINT_END(vector_swi)
.global arch_syscall_exit
arch_syscall_exit:
cpsid i
pop {r4, r5, lr}
msr spsr_cxsf, lr
pop {lr}
.global arch_ret_to_user
arch_ret_to_user:
push {r0-r12, lr}
bl lwp_check_debug
bl lwp_check_exit_request
cmp r0, #0
beq 1f
mov r0, #0
b sys_exit
1:
mov r0, sp
/* r0 -> exp frame */
bl lwp_thread_signal_catch
pop {r0-r12, lr}
push {r0}
ldr r0, =rt_dbg_ops
ldr r0, [r0]
cmp r0, #0
pop {r0}
beq 2f
push {r0-r3, r12, lr}
mov r0, lr
bl dbg_attach_req
pop {r0-r3, r12, lr}
2:
movs pc, lr
#ifdef RT_USING_SMART
.global lwp_check_debug
lwp_check_debug:
ldr r0, =rt_dbg_ops
ldr r0, [r0]
cmp r0, #0
bne 1f
bx lr
1:
push {lr}
bl dbg_check_suspend
cmp r0, #0
beq lwp_check_debug_quit
cps #Mode_SYS
sub sp, #8
ldr r0, =lwp_debugreturn
ldr r1, [r0]
str r1, [sp]
ldr r1, [r0, #4]
str r1, [sp, #4]
mov r1, sp
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
add r1, #4
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
dsb
isb
mcr p15, 0, r0, c7, c5, 0 ;//iciallu
dsb
isb
mov r0, sp /* lwp_debugreturn */
cps #Mode_SVC
mrs r1, spsr
push {r1}
mov r1, #Mode_USR
msr spsr_cxsf, r1
movs pc, r0
ret_from_user:
cps #Mode_SYS
add sp, #8
cps #Mode_SVC
/*
pop {r0 - r3, r12}
pop {r4 - r6, lr}
*/
add sp, #(4*9)
pop {r4}
msr spsr_cxsf, r4
lwp_check_debug_quit:
pop {pc}
arch_signal_quit:
cpsid i
/* drop context of signal handler */
pop {r0 - r3, r12}
pop {r4, r5, lr}
pop {lr}
/* restore context */
cps #Mode_SYS
mov r0, sp
cps #Mode_SVC
bl arch_signal_ucontext_restore
/* lr <- *(&frame.ip) */
ldr lr, [r0]
cps #Mode_SYS
mov sp, r0
/* drop ip in the frame and restore cpsr */
pop {r0}
pop {r0}
msr spsr_cxsf, r0
pop {r0-r12, lr}
cps #Mode_SVC
b arch_ret_to_user
/**
* rt_noreturn
* void arch_thread_signal_enter(
* int signo, -> r0
* siginfo_t *psiginfo, -> r1
* void *exp_frame, -> r2
* void *entry_uaddr, -> r3
* lwp_sigset_t *save_sig_mask, -> ??
* )
*/
.global arch_thread_signal_enter
arch_thread_signal_enter:
mov r4, r0
mov r5, r3
cps #Mode_SYS
mov r0, lr
mov r3, sp
cps #Mode_SVC
bl arch_signal_ucontext_save
/* reset user sp */
cps #Mode_SYS
mov sp, r0
mov lr, r0
cps #Mode_SVC
/* r1,r2 <- new_user_sp */
mov r1, r0
mov r2, r0
/* r0 <- signo */
mov r0, r4
mov r1, r0
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
add r1, #4
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
dsb
isb
mcr p15, 0, r0, c7, c5, 0 ;//iciallu
dsb
isb
/* r4 <- &sigreturn */
mov r4, r2
/* lr <- user_handler() */
mov lr, r5
cmp lr, #0
moveq lr, r4
/* r1 <- siginfo */
mov r1, r2
add r1, #8
/* handler(signo, siginfo, ucontext) */
movs pc, lr
lwp_debugreturn:
mov r7, #0xf000
svc #0
.global lwp_sigreturn
lwp_sigreturn:
mov r7, #0xe000
svc #0
lwp_thread_return:
mov r0, #0
mov r7, #0x01
svc #0
#endif
.global check_vfp
check_vfp:
#ifdef RT_USING_FPU
vmrs r0, fpexc
ubfx r0, r0, #30, #1
#else
mov r0, #0
#endif
mov pc, lr
.global get_vfp
get_vfp:
#ifdef RT_USING_FPU
vstmia r0!, {d0-d15}
vstmia r0!, {d16-d31}
vmrs r1, fpscr
str r1, [r0]
#endif
mov pc, lr
.globl arch_get_tidr
arch_get_tidr:
mrc p15, 0, r0, c13, c0, 3
bx lr
.global arch_set_thread_area
arch_set_thread_area:
.globl arch_set_tidr
arch_set_tidr:
mcr p15, 0, r0, c13, c0, 3
bx lr
/* kuser suppurt */
.macro kuser_pad, sym, size
.if (. - \sym) & 3
.rept 4 - (. - \sym) & 3
.byte 0
.endr
.endif
.rept (\size - (. - \sym)) / 4
.word 0xe7fddef1
.endr
.endm
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
__kuser_cmpxchg64: @ 0xffff0f60
stmfd sp!, {r4, r5, r6, lr}
ldmia r0, {r4, r5} @ load old val
ldmia r1, {r6, lr} @ load new val
1: ldmia r2, {r0, r1} @ load current val
eors r3, r0, r4 @ compare with oldval (1)
eorseq r3, r1, r5 @ compare with oldval (2)
2: stmiaeq r2, {r6, lr} @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
ldmfd sp!, {r4, r5, r6, pc}
kuser_pad __kuser_cmpxchg64, 64
__kuser_memory_barrier: @ 0xffff0fa0
dmb
mov pc, lr
kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0
1: ldr r3, [r2] @ load current val
subs r3, r3, r0 @ compare with oldval
2: streq r1, [r2] @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
mov pc, lr
kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
mov pc, lr
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
kuser_pad __kuser_get_tls, 16
.rep 3
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
__kuser_helper_version: @ 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:
|
vandercookking/h7_device_RTT
| 6,431
|
rt-thread/components/lwp/arch/risc-v/rv64/lwp_gcc.S
|
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-12-10 Jesven first version
* 2021-02-03 lizhirui port to riscv64
* 2021-02-19 lizhirui port to new version of rt-smart
* 2022-11-08 Wangxiaoyao Cleanup codes;
* Support new context switch
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
*/
#include "rtconfig.h"
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif /* __ASSEMBLY__ */
#include "cpuport.h"
#include "encoding.h"
#include "stackframe.h"
#include "asm-generic.h"
.section .text.lwp
/*
* void arch_start_umode(args, text, ustack, kstack);
*/
.global arch_start_umode
.type arch_start_umode, % function
arch_start_umode:
// load kstack for user process
csrw sscratch, a3
li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
csrc sstatus, t0
li t0, SSTATUS_SPIE // enable interrupt when return to user mode
csrs sstatus, t0
csrw sepc, a1
mv a3, a2
sret//enter user mode
/*
* void arch_crt_start_umode(args, text, ustack, kstack);
*/
.global arch_crt_start_umode
.type arch_crt_start_umode, % function
arch_crt_start_umode:
li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
csrc sstatus, t0
li t0, SSTATUS_SPIE // enable interrupt when return to user mode
csrs sstatus, t0
csrw sepc, a1
mv s0, a0
mv s1, a1
mv s2, a2
mv s3, a3
mv a0, s2
call lwp_copy_return_code_to_user_stack
mv a0, s2
call lwp_fix_sp
mv sp, a0//user_sp
mv ra, a0//return address
mv a0, s0//args
csrw sscratch, s3
sret//enter user mode
/**
* Unify exit point from kernel mode to enter user space
* we handle following things here:
* 1. restoring user mode debug state (not support yet)
* 2. handling thread's exit request
* 3. handling POSIX signal
* 4. restoring user context
* 5. jump to user mode
*/
.global arch_ret_to_user
arch_ret_to_user:
// TODO: we don't support kernel gdb server in risc-v yet
// so we don't check debug state here and handle debugging bussiness
call lwp_check_exit_request
beqz a0, 1f
mv a0, x0
call sys_exit
1:
mv a0, sp
call lwp_thread_signal_catch
ret_to_user_exit:
RESTORE_ALL
// `RESTORE_ALL` also reset sp to user sp, and setup sscratch
sret
/**
* Restore user context from exception frame stroraged in ustack
* And handle pending signals;
*/
arch_signal_quit:
LOAD a0, FRAME_OFF_SP(sp)
call arch_signal_ucontext_restore
/* reset kernel sp to the stack */
STORE sp, FRAME_OFF_SP(a0)
/* return value is user sp */
mv sp, a0
/* restore user sp before enter trap */
addi a0, sp, CTX_REG_NR * REGBYTES
csrw sscratch, a0
RESTORE_ALL
SAVE_ALL
j arch_ret_to_user
/**
* rt_noreturn
* void arch_thread_signal_enter(
* int signo, -> a0
* siginfo_t *psiginfo, -> a1
* void *exp_frame, -> a2
* void *entry_uaddr, -> a3
* lwp_sigset_t *save_sig_mask, -> a4
* )
*/
.global arch_thread_signal_enter
arch_thread_signal_enter:
mv s3, a2
mv s2, a0
mv s1, a3
LOAD t0, FRAME_OFF_SP(a2)
mv a3, t0
call arch_signal_ucontext_save
/** restore kernel sp */
addi sp, s3, CTX_REG_NR * REGBYTES
/**
* set regiter RA to user signal handler
* set sp to user sp & save kernel sp in sscratch
*/
mv ra, a0
csrw sscratch, sp
mv sp, a0
/**
* s1 is signal_handler,
* s1 = !s1 ? lwp_sigreturn : s1;
*/
bnez s1, 1f
mv s1, ra
1:
/* enter user mode and enable interrupt when return to user mode */
li t0, SSTATUS_SPP
csrc sstatus, t0
li t0, SSTATUS_SPIE
csrs sstatus, t0
/* sepc <- signal_handler */
csrw sepc, s1
/* a0 <- signal id */
mv a0, s2
/* a1 <- siginfo */
add a1, sp, 16
/* dummy a2 */
mv a2, a1
/**
* handler(signo, psi, ucontext);
*/
sret
.align 3
lwp_debugreturn:
li a7, 0xff
ecall
.align 3
.global lwp_sigreturn
lwp_sigreturn:
li a7, 0xfe
ecall
.align 3
lwp_sigreturn_end:
.align 3
.global lwp_thread_return
lwp_thread_return:
li a0, 0
li a7, 1
ecall
.align 3
.global lwp_thread_return_end
lwp_thread_return_end:
.globl arch_get_tidr
arch_get_tidr:
mv a0, tp
ret
.global arch_set_thread_area
arch_set_thread_area:
.globl arch_set_tidr
arch_set_tidr:
mv tp, a0
ret
.global arch_clone_exit
.global arch_fork_exit
arch_fork_exit:
arch_clone_exit:
j arch_syscall_exit
START_POINT(syscall_entry)
#ifndef ARCH_USING_NEW_CTX_SWITCH
//swap to thread kernel stack
csrr t0, sstatus
andi t0, t0, 0x100
beqz t0, __restore_sp_from_tcb
__restore_sp_from_sscratch: // from kernel
csrr t0, sscratch
j __move_stack_context
__restore_sp_from_tcb: // from user
la a0, rt_current_thread
LOAD a0, 0(a0)
jal get_thread_kernel_stack_top
mv t0, a0
__move_stack_context:
mv t1, sp//src
mv sp, t0//switch stack
addi sp, sp, -CTX_REG_NR * REGBYTES
//copy context
li s0, CTX_REG_NR//cnt
mv t2, sp//dst
copy_context_loop:
LOAD t0, 0(t1)
STORE t0, 0(t2)
addi s0, s0, -1
addi t1, t1, 8
addi t2, t2, 8
bnez s0, copy_context_loop
#endif /* ARCH_USING_NEW_CTX_SWITCH */
/* fetch SYSCALL ID */
LOAD a7, 17 * REGBYTES(sp)
addi a7, a7, -0xfe
beqz a7, arch_signal_quit
#ifdef ARCH_MM_MMU
/* save setting when syscall enter */
call rt_thread_self
call lwp_user_setting_save
#endif
mv a0, sp
OPEN_INTERRUPT
call syscall_handler
j arch_syscall_exit
START_POINT_END(syscall_entry)
.global arch_syscall_exit
arch_syscall_exit:
CLOSE_INTERRUPT
#if defined(ARCH_MM_MMU)
LOAD s0, 2 * REGBYTES(sp)
andi s0, s0, 0x100
bnez s0, dont_ret_to_user
j arch_ret_to_user
#endif
dont_ret_to_user:
#ifdef ARCH_MM_MMU
/* restore setting when syscall exit */
call rt_thread_self
call lwp_user_setting_restore
/* after restore the reg `tp`, need modify context */
STORE tp, 4 * REGBYTES(sp)
#endif
//restore context
RESTORE_ALL
csrw sscratch, zero
sret
|
vandercookking/h7_device_RTT
| 11,984
|
rt-thread/components/lwp/arch/aarch64/cortex-a/lwp_gcc.S
|
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "rtconfig.h"
#include "asm-generic.h"
#include "asm-fpu.h"
#include "armv8.h"
/*********************
* SPSR BIT *
*********************/
#define SPSR_Mode(v) ((v) << 0)
#define SPSR_A64 (0 << 4)
#define SPSR_RESEVRED_5 (0 << 5)
#define SPSR_FIQ_MASKED(v) ((v) << 6)
#define SPSR_IRQ_MASKED(v) ((v) << 7)
#define SPSR_SERROR_MASKED(v) ((v) << 8)
#define SPSR_D_MASKED(v) ((v) << 9)
#define SPSR_RESEVRED_10_19 (0 << 10)
#define SPSR_IL(v) ((v) << 20)
#define SPSR_SS(v) ((v) << 21)
#define SPSR_RESEVRED_22_27 (0 << 22)
#define SPSR_V(v) ((v) << 28)
#define SPSR_C(v) ((v) << 29)
#define SPSR_Z(v) ((v) << 30)
#define SPSR_N(v) ((v) << 31)
/**************************************************/
.text
/*
* void arch_start_umode(args, text, ustack, kstack);
*/
.global arch_start_umode
.type arch_start_umode, % function
arch_start_umode:
mov sp, x3
mov x4, #(SPSR_Mode(0) | SPSR_A64)
mov x3, x2 ;/* user stack top */
msr daifset, #3
dsb sy
mrs x30, sp_el0
msr spsr_el1, x4
msr elr_el1, x1
eret
/*
* void arch_crt_start_umode(args, text, ustack, kstack);
*/
.global arch_crt_start_umode
.type arch_crt_start_umode, % function
arch_crt_start_umode:
sub x4, x2, #0x10
adr x2, lwp_thread_return
ldr x5, [x2]
str x5, [x4]
ldr x5, [x2, #4]
str x5, [x4, #4]
ldr x5, [x2, #8]
str x5, [x4, #8]
mov x5, x4
dc cvau, x5
add x5, x5, #8
dc cvau, x5
dsb sy
ic ialluis
dsb sy
msr sp_el0, x4
mov sp, x3
mov x4, #(SPSR_Mode(0) | SPSR_A64)
msr daifset, #3
dsb sy
mrs x30, sp_el0
msr spsr_el1, x4
msr elr_el1, x1
eret
/*
void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
*/
.global arch_set_thread_context
arch_set_thread_context:
sub x1, x1, #CONTEXT_SIZE
str x2, [x1, #CONTEXT_OFFSET_SP_EL0]
sub x1, x1, #CONTEXT_SIZE
str xzr, [x1, #CONTEXT_OFFSET_X0] /* new thread return 0 */
mov x4, #((3 << 6) | 0x4 | 0x1) /* el1h, disable interrupt */
str x4, [x1, #CONTEXT_OFFSET_SPSR_EL1]
str x0, [x1, #CONTEXT_OFFSET_ELR_EL1]
str x1, [x3]
ret
.global arch_get_user_sp
arch_get_user_sp:
mrs x0, sp_el0
ret
.global arch_fork_exit
.global arch_clone_exit
arch_fork_exit:
arch_clone_exit:
b arch_syscall_exit
/*
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
*/
.global lwp_exec_user
lwp_exec_user:
mov sp, x1
mov x4, #(SPSR_Mode(0) | SPSR_A64)
ldr x3, =0x0000ffff80000000
msr daifset, #3
msr spsr_el1, x4
msr elr_el1, x2
eret
/*
* void SVC_Handler(regs);
* since this routine reset the SP, we take it as a start point
*/
START_POINT(SVC_Handler)
/* x0 is initial sp */
mov sp, x0
msr daifclr, #3 /* enable interrupt */
bl rt_thread_self
bl lwp_user_setting_save
ldp x8, x9, [sp, #(CONTEXT_OFFSET_X8)]
and x0, x8, #0xf000
cmp x0, #0xe000
beq arch_signal_quit
cmp x0, #0xf000
beq ret_from_user
uxtb x0, w8
bl lwp_get_sys_api
cmp x0, xzr
mov x30, x0
beq arch_syscall_exit
ldp x0, x1, [sp, #(CONTEXT_OFFSET_X0)]
ldp x2, x3, [sp, #(CONTEXT_OFFSET_X2)]
ldp x4, x5, [sp, #(CONTEXT_OFFSET_X4)]
ldp x6, x7, [sp, #(CONTEXT_OFFSET_X6)]
blr x30
/* jump explictly, make this code position independant */
b arch_syscall_exit
START_POINT_END(SVC_Handler)
.global arch_syscall_exit
arch_syscall_exit:
msr daifset, #3
ldp x2, x3, [sp], #0x10 /* SPSR and ELR. */
msr spsr_el1, x3
msr elr_el1, x2
ldp x29, x30, [sp], #0x10
msr sp_el0, x29
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
add sp, sp, #0x40
RESTORE_FPU sp
/* the sp is reset to the outer most level, irq and fiq are disabled */
START_POINT(arch_ret_to_user)
/* save exception frame */
SAVE_FPU sp
stp x0, x1, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x4, x5, [sp, #-0x10]!
stp x6, x7, [sp, #-0x10]!
stp x8, x9, [sp, #-0x10]!
stp x10, x11, [sp, #-0x10]!
stp x12, x13, [sp, #-0x10]!
stp x14, x15, [sp, #-0x10]!
stp x16, x17, [sp, #-0x10]!
stp x18, x19, [sp, #-0x10]!
stp x20, x21, [sp, #-0x10]!
stp x22, x23, [sp, #-0x10]!
stp x24, x25, [sp, #-0x10]!
stp x26, x27, [sp, #-0x10]!
stp x28, x29, [sp, #-0x10]!
mrs x0, fpcr
mrs x1, fpsr
stp x0, x1, [sp, #-0x10]!
stp x29, x30, [sp, #-0x10]!
/* pre-action */
bl lwp_check_debug
bl lwp_check_exit_request
cbz w0, 1f
/* exit on event */
mov x0, xzr
b sys_exit
1:
/* check if dbg ops exist */
ldr x0, =rt_dbg_ops
ldr x0, [x0]
cbz x0, 3f
bl dbg_thread_in_debug
mov x1, #(1 << 21)
mrs x2, spsr_el1
cbz w0, 2f
orr x2, x2, x1
msr spsr_el1, x2
b 3f
2:
bic x2, x2, x1
msr spsr_el1, x2
3:
/**
* push 2 dummy words to simulate a exception frame of interrupt
* @note in kernel state, the context switch dont saved the context
*/
mrs x0, spsr_el1
mrs x1, elr_el1
stp x1, x0, [sp, #-0x10]!
mov x0, sp
msr daifclr, #3
bl lwp_thread_signal_catch
msr daifset, #3
ldp x1, x0, [sp], #0x10
msr spsr_el1, x0
msr elr_el1, x1
/* check debug */
/* restore exception frame */
ldp x29, x30, [sp], #0x10
ldp x0, x1, [sp], #0x10
msr fpcr, x0
msr fpsr, x1
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
stp x0, x1, [sp, #-0x10]!
ldr x0, =rt_dbg_ops
ldr x0, [x0]
cmp x0, xzr
ldp x0, x1, [sp], #0x10
beq 1f
/* save */
SAVE_FPU sp
stp x0, x1, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x4, x5, [sp, #-0x10]!
stp x6, x7, [sp, #-0x10]!
stp x8, x9, [sp, #-0x10]!
stp x10, x11, [sp, #-0x10]!
stp x12, x13, [sp, #-0x10]!
stp x14, x15, [sp, #-0x10]!
stp x16, x17, [sp, #-0x10]!
stp x18, x19, [sp, #-0x10]!
stp x20, x21, [sp, #-0x10]!
stp x22, x23, [sp, #-0x10]!
stp x24, x25, [sp, #-0x10]!
stp x26, x27, [sp, #-0x10]!
stp x28, x29, [sp, #-0x10]!
mrs x0, fpcr
mrs x1, fpsr
stp x0, x1, [sp, #-0x10]!
stp x29, x30, [sp, #-0x10]!
mrs x0, elr_el1
bl dbg_attach_req
/* restore */
ldp x29, x30, [sp], #0x10
ldp x0, x1, [sp], #0x10
msr fpcr, x0
msr fpsr, x1
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
1:
eret
START_POINT_END(arch_ret_to_user)
.global lwp_check_debug
lwp_check_debug:
ldr x0, =rt_dbg_ops
ldr x0, [x0]
cbnz x0, 1f
ret
1:
stp x29, x30, [sp, #-0x10]!
bl dbg_check_suspend
cbz w0, lwp_check_debug_quit
mrs x2, sp_el0
sub x2, x2, #0x10
mov x3, x2
msr sp_el0, x2
ldr x0, =lwp_debugreturn
ldr w1, [x0]
str w1, [x2]
ldr w1, [x0, #4]
str w1, [x2, #4]
dc cvau, x2
add x2, x2, #4
dc cvau, x2
dsb sy
isb sy
ic ialluis
isb sy
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #-0x10]!
msr elr_el1, x3 /* lwp_debugreturn */
mov x1, #(SPSR_Mode(0) | SPSR_A64)
orr x1, x1, #(1 << 21)
msr spsr_el1, x1
eret
ret_from_user:
/* sp_el0 += 16 for drop ins lwp_debugreturn */
mrs x0, sp_el0
add x0, x0, #0x10
msr sp_el0, x0
/* now is el1, sp is pos(empty) - sizeof(context) */
mov x0, sp
add x0, x0, #0x220
mov sp, x0
ldp x0, x1, [sp], #0x10 /* x1 is origin spsr_el1 */
msr elr_el1, x0 /* x0 is origin elr_el1 */
msr spsr_el1, x1
lwp_check_debug_quit:
ldp x29, x30, [sp], #0x10
ret
arch_signal_quit:
msr daifset, #3
/* drop current exception frame */
add sp, sp, #CONTEXT_SIZE
mrs x0, sp_el0
bl arch_signal_ucontext_restore
add x0, x0, #-CONTEXT_SIZE
msr sp_el0, x0
/* restore previous exception frame */
msr spsel, #0
ldp x2, x3, [sp], #0x10
msr elr_el1, x2
msr spsr_el1, x3
ldp x29, x30, [sp], #0x10
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
msr spsel, #1
b arch_ret_to_user
/**
* rt_noreturn
* void arch_thread_signal_enter(
* int signo, -> x0
* siginfo_t *psiginfo, -> x1
* void *exp_frame, -> x2
* void *entry_uaddr, -> x3
* lwp_sigset_t *save_sig_mask, -> x4
* )
*/
.global arch_thread_signal_enter
arch_thread_signal_enter:
mov x19, x0
mov x20, x2 /* exp_frame */
mov x21, x3
/**
* move exception frame to user stack
*/
mrs x0, sp_el0
mov x3, x4
/* arch_signal_ucontext_save(user_sp, psiginfo, exp_frame, save_sig_mask); */
bl arch_signal_ucontext_save
dc cvau, x0
dsb sy
ic ialluis
dsb sy
/**
* @brief Prepare the environment for signal handler
*/
/**
* reset the cpsr
* and drop exp frame on kernel stack, reset kernel sp
*
* @note Since we will reset spsr, but the reschedule will
* corrupt the spsr, we diable irq for a short period here
*/
msr daifset, #3
ldr x1, [x20, #CONTEXT_OFFSET_SPSR_EL1]
msr spsr_el1, x1
add sp, x20, #CONTEXT_SIZE
/** reset user sp */
msr sp_el0, x0
/** set the return address to the sigreturn */
mov x30, x0
/** set the entry address of signal handler */
msr elr_el1, x21
/* siginfo is above the return address */
add x2, x30, 16
add x1, x2, #CONTEXT_SIZE
mov x0, x19
/**
* handler(signo, psi, ucontext);
*/
eret
lwp_debugreturn:
mov x8, 0xf000
svc #0
.global lwp_sigreturn
lwp_sigreturn:
mov x8, #0xe000
svc #0
lwp_thread_return:
mov x0, xzr
mov x8, #0x01
svc #0
.globl arch_get_tidr
arch_get_tidr:
mrs x0, tpidr_el0
ret
.global arch_set_thread_area
arch_set_thread_area:
.globl arch_set_tidr
arch_set_tidr:
msr tpidr_el0, x0
ret
|
vandercookking/h7_device_RTT
| 1,321
|
rt-thread/components/lwp/arch/x86/i386/lwp_gcc.S
|
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-7-14 JasonHu first version
*/
#include "rtconfig.h"
.section .text.lwp
/*
* void lwp_switch_to_user(frame);
*/
.global lwp_switch_to_user
lwp_switch_to_user:
movl 0x4(%esp), %esp
addl $4,%esp // skip intr no
popal
popl %gs
popl %fs
popl %es
popl %ds
addl $4, %esp // skip error_code
iret // enter to user mode
.extern arch_syscall_exit
.global sys_fork
.global sys_vfork
.global arch_fork_exit
sys_fork:
sys_vfork:
jmp _sys_fork
arch_fork_exit:
jmp arch_syscall_exit
.global sys_clone
.global arch_clone_exit
sys_clone:
jmp _sys_clone
arch_clone_exit:
jmp arch_syscall_exit
/**
* rt thread return code
*/
.align 4
.global lwp_thread_return
lwp_thread_return:
movl $1, %eax // eax = 1, sys_exit
movl $0, %ebx
int $0x80
.align 4
.global lwp_thread_return_end
lwp_thread_return_end:
#ifdef RT_USING_SIGNALS
/**
* signal return code
*/
.align 4
.global lwp_signal_return
lwp_signal_return:
movl $0xe000, %eax // special syscall id for return code
int $0x80
.align 4
.global lwp_signal_return_end
lwp_signal_return_end:
#endif /* RT_USING_SIGNALS */
|
vectorpikachu/compiler
| 6,531
|
hello.S
|
.text
.globl main
main:
addi sp, sp, -352
entry:
li t0, 1
sw t0, 0(sp)
li t0, 0
sw t0, 4(sp)
lw t0, 0(sp)
sw t0, 8(sp)
lw t0, 8(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 12(sp)
lw t0, 12(sp)
bnez t0, then1
j short_circuit8
then1:
li t0, 1
sw t0, 4(sp)
li t0, 0
sw t0, 0(sp)
j end1
short_circuit8:
lw t0, 4(sp)
sw t0, 16(sp)
lw t0, 16(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 20(sp)
lw t0, 20(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 24(sp)
lw t0, 24(sp)
bnez t0, then1
j short_circuit7
end1:
lw t0, 0(sp)
sw t0, 28(sp)
lw t0, 28(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 32(sp)
lw t0, 32(sp)
bnez t0, short_circuit11
j end2
short_circuit7:
li t0, 0
li t1, 1
add t1, t0, t1
sw t1, 36(sp)
lw t0, 36(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 40(sp)
lw t0, 40(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 44(sp)
lw t0, 44(sp)
bnez t0, then1
j short_circuit6
short_circuit11:
lw t0, 4(sp)
sw t0, 48(sp)
lw t0, 48(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 52(sp)
lw t0, 52(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 56(sp)
lw t0, 56(sp)
bnez t0, short_circuit10
j end2
end2:
li t0, 2
sw t0, 60(sp)
li t0, 5
sw t0, 64(sp)
li t0, 7
sw t0, 68(sp)
lw t0, 60(sp)
sw t0, 72(sp)
lw t0, 64(sp)
sw t0, 76(sp)
lw t0, 72(sp)
lw t1, 76(sp)
slt t1, t0, t1
sw t1, 80(sp)
lw t0, 80(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 84(sp)
lw t0, 84(sp)
bnez t0, short_circuit15
j short_circuit14
short_circuit6:
lw t0, 0(sp)
sw t0, 88(sp)
lw t0, 88(sp)
li t1, 0
sub t1, t0, t1
seqz t1, t1
sw t1, 92(sp)
lw t0, 92(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 96(sp)
lw t0, 96(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 100(sp)
lw t0, 100(sp)
bnez t0, then1
j short_circuit5
short_circuit10:
li t0, 1
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 104(sp)
lw t0, 104(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 108(sp)
lw t0, 108(sp)
bnez t0, short_circuit9
j end2
short_circuit15:
lw t0, 64(sp)
sw t0, 112(sp)
lw t0, 68(sp)
sw t0, 116(sp)
lw t0, 112(sp)
lw t1, 116(sp)
slt t1, t0, t1
sw t1, 120(sp)
lw t0, 120(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 124(sp)
lw t0, 124(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 128(sp)
lw t0, 128(sp)
bnez t0, then3
j short_circuit14
short_circuit14:
lw t0, 64(sp)
sw t0, 132(sp)
lw t0, 0(sp)
sw t0, 136(sp)
lw t0, 132(sp)
lw t1, 136(sp)
sgt t1, t0, t1
sw t1, 140(sp)
lw t0, 140(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 144(sp)
lw t0, 144(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 148(sp)
lw t0, 148(sp)
bnez t0, then3
j short_circuit13
short_circuit5:
lw t0, 4(sp)
sw t0, 152(sp)
lw t0, 152(sp)
li t1, 0
sub t1, t0, t1
seqz t1, t1
sw t1, 156(sp)
lw t0, 156(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 160(sp)
lw t0, 160(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 164(sp)
lw t0, 164(sp)
bnez t0, then1
j short_circuit4
short_circuit9:
lw t0, 0(sp)
sw t0, 168(sp)
lw t0, 4(sp)
sw t0, 172(sp)
lw t0, 168(sp)
lw t1, 172(sp)
sub t1, t0, t1
seqz t1, t1
sw t1, 176(sp)
lw t0, 176(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 180(sp)
lw t0, 180(sp)
bnez t0, then2
j end2
then3:
li a0, 5
addi sp, sp, 352
ret
short_circuit13:
lw t0, 4(sp)
sw t0, 184(sp)
lw t0, 60(sp)
sw t0, 188(sp)
lw t0, 184(sp)
lw t1, 188(sp)
slt t1, t0, t1
sw t1, 192(sp)
lw t0, 192(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 196(sp)
lw t0, 196(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 200(sp)
lw t0, 200(sp)
bnez t0, then3
j short_circuit12
short_circuit4:
lw t0, 0(sp)
sw t0, 204(sp)
lw t0, 4(sp)
sw t0, 208(sp)
lw t0, 208(sp)
lw t1, 204(sp)
add t1, t0, t1
sw t1, 212(sp)
lw t0, 212(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 216(sp)
lw t0, 216(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 220(sp)
lw t0, 220(sp)
bnez t0, then1
j short_circuit3
then2:
li a0, 0
addi sp, sp, 352
ret
short_circuit12:
lw t0, 4(sp)
sw t0, 224(sp)
lw t0, 0(sp)
sw t0, 228(sp)
lw t0, 228(sp)
lw t1, 224(sp)
add t1, t0, t1
sw t1, 232(sp)
lw t0, 60(sp)
sw t0, 236(sp)
lw t0, 232(sp)
lw t1, 236(sp)
slt t1, t0, t1
sw t1, 240(sp)
lw t0, 240(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 244(sp)
lw t0, 244(sp)
bnez t0, then3
j end3
short_circuit3:
lw t0, 0(sp)
sw t0, 248(sp)
lw t0, 4(sp)
sw t0, 252(sp)
lw t0, 248(sp)
lw t1, 252(sp)
mul t1, t0, t1
sw t1, 256(sp)
lw t0, 256(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 260(sp)
lw t0, 260(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 264(sp)
lw t0, 264(sp)
bnez t0, then1
j short_circuit2
end3:
li t0, 6
li t1, 6
sub t1, t0, t1
seqz t1, t1
sw t1, 268(sp)
lw t0, 268(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 272(sp)
lw t0, 272(sp)
bnez t0, then4
j short_circuit17
short_circuit2:
lw t0, 4(sp)
sw t0, 276(sp)
lw t0, 0(sp)
sw t0, 280(sp)
lw t0, 276(sp)
lw t1, 280(sp)
div t1, t0, t1
sw t1, 284(sp)
lw t0, 284(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 288(sp)
lw t0, 288(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 292(sp)
lw t0, 292(sp)
bnez t0, then1
j short_circuit1
then4:
lw t0, 60(sp)
sw t0, 296(sp)
lw t0, 296(sp)
li t1, 5
add t1, t0, t1
sw t1, 300(sp)
lw t0, 300(sp)
sw t0, 60(sp)
lw t0, 60(sp)
sw t0, 304(sp)
lw a0, 304(sp)
addi sp, sp, 352
ret
short_circuit17:
lw t0, 64(sp)
sw t0, 308(sp)
lw t0, 308(sp)
li t1, 7
sub t1, t0, t1
seqz t1, t1
sw t1, 312(sp)
lw t0, 312(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 316(sp)
lw t0, 316(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 320(sp)
lw t0, 320(sp)
bnez t0, then4
j short_circuit16
short_circuit1:
lw t0, 0(sp)
sw t0, 324(sp)
lw t0, 4(sp)
sw t0, 328(sp)
lw t0, 328(sp)
lw t1, 324(sp)
sub t1, t0, t1
sw t1, 332(sp)
lw t0, 332(sp)
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 336(sp)
lw t0, 336(sp)
bnez t0, then1
j end1
short_circuit16:
li t0, 6
li t1, 0
sub t1, t0, t1
snez t1, t1
sw t1, 340(sp)
lw t0, 340(sp)
bnez t0, then4
j end4
end4:
li a0, 77
addi sp, sp, 352
ret
|
vectorpikachu/second-compiler
| 18,174
|
hello.S
|
.text
.globl init
init:
addi sp, sp, -144
init_0_entry_11:
j init_0_bb_2
init_0_bb_2:
mv t0, a0
sw t0, 16(sp)
li t0, 0
sw t0, 20(sp)
j init_0_while_entry_12
init_0_while_entry_12:
lw t0, 20(sp)
sw t0, 24(sp)
li t1, 10
slt t0, t0, t1
sw t0, 28(sp)
beqz t0, init_0_while_exit_9
j init_0_while_body_3
init_0_while_body_3:
li t0, 0
sw t0, 32(sp)
j init_0_while_entry_10
init_0_while_entry_10:
lw t0, 32(sp)
sw t0, 36(sp)
li t1, 10
slt t0, t0, t1
sw t0, 40(sp)
beqz t0, init_0_while_exit_5
j init_0_while_body_7
init_0_while_body_7:
li t0, 0
sw t0, 44(sp)
j init_0_while_entry_6
init_0_while_entry_6:
lw t0, 44(sp)
sw t0, 48(sp)
li t1, 10
slt t0, t0, t1
sw t0, 52(sp)
beqz t0, init_0_while_exit_8
j init_0_while_body_1
init_0_while_body_1:
lw t0, 16(sp)
sw t0, 56(sp)
lw t0, 20(sp)
sw t0, 60(sp)
lw t0, 56(sp)
lw t1, 60(sp)
li t2, 400
mul t1, t1, t2
add t0, t0, t1
sw t0, 64(sp)
lw t0, 32(sp)
sw t0, 68(sp)
lw t0, 64(sp)
lw t1, 68(sp)
li t2, 400
mul t1, t1, t2
add t0, t0, t1
sw t0, 72(sp)
lw t0, 44(sp)
sw t0, 76(sp)
lw t0, 72(sp)
lw t1, 76(sp)
li t2, 40
mul t1, t1, t2
add t0, t0, t1
sw t0, 80(sp)
lw t0, 20(sp)
sw t0, 84(sp)
li t1, 100
mul t0, t0, t1
sw t0, 88(sp)
lw t0, 32(sp)
sw t0, 92(sp)
li t1, 10
mul t0, t0, t1
sw t0, 96(sp)
lw t0, 88(sp)
lw t1, 96(sp)
add t0, t0, t1
sw t0, 100(sp)
lw t0, 44(sp)
sw t0, 104(sp)
lw t0, 100(sp)
lw t1, 104(sp)
add t0, t0, t1
sw t0, 108(sp)
lw t1, 80(sp)
sw t0, 0(t1)
lw t0, 44(sp)
sw t0, 112(sp)
li t1, 1
add t0, t0, t1
sw t0, 116(sp)
sw t0, 44(sp)
j init_0_while_entry_6
init_0_while_exit_8:
lw t0, 32(sp)
sw t0, 120(sp)
li t1, 1
add t0, t0, t1
sw t0, 124(sp)
sw t0, 32(sp)
j init_0_while_entry_10
init_0_while_exit_5:
lw t0, 20(sp)
sw t0, 128(sp)
li t1, 1
add t0, t0, t1
sw t0, 132(sp)
sw t0, 20(sp)
j init_0_while_entry_12
init_0_while_exit_9:
j init_0_exit_4
init_0_exit_4:
addi sp, sp, 144
ret
.text
.globl f1
f1:
addi sp, sp, -256
f1_1_entry_2:
li t0, 0
sw t0, 44(sp)
j f1_1_bb_4
f1_1_bb_4:
mv t0, a0
sw t0, 48(sp)
mv t0, a1
sw t0, 52(sp)
mv t0, a2
sw t0, 56(sp)
mv t0, a3
sw t0, 60(sp)
mv t0, a4
sw t0, 64(sp)
mv t0, a5
sw t0, 68(sp)
mv t0, a6
sw t0, 72(sp)
mv t0, a7
sw t0, 76(sp)
lw t0, 256(sp)
sw t0, 80(sp)
lw t0, 260(sp)
sw t0, 84(sp)
lw t0, 48(sp)
sw t0, 88(sp)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 92(sp)
lw t0, 52(sp)
sw t0, 96(sp)
li t1, 1
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 100(sp)
lw t1, 92(sp)
lw t0, 0(t1)
sw t0, 104(sp)
lw t1, 100(sp)
lw t0, 0(t1)
sw t0, 108(sp)
lw t0, 104(sp)
lw t1, 108(sp)
add t0, t0, t1
sw t0, 112(sp)
lw t0, 56(sp)
sw t0, 116(sp)
li t1, 2
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 120(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 124(sp)
lw t0, 112(sp)
lw t1, 124(sp)
add t0, t0, t1
sw t0, 128(sp)
lw t0, 60(sp)
sw t0, 132(sp)
li t1, 3
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 136(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 140(sp)
lw t0, 128(sp)
lw t1, 140(sp)
add t0, t0, t1
sw t0, 144(sp)
lw t0, 64(sp)
sw t0, 148(sp)
li t1, 4
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 152(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 156(sp)
lw t0, 144(sp)
lw t1, 156(sp)
add t0, t0, t1
sw t0, 160(sp)
lw t0, 68(sp)
sw t0, 164(sp)
li t1, 5
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 168(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 172(sp)
lw t0, 160(sp)
lw t1, 172(sp)
add t0, t0, t1
sw t0, 176(sp)
lw t0, 72(sp)
sw t0, 180(sp)
li t1, 6
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 184(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 188(sp)
lw t0, 176(sp)
lw t1, 188(sp)
add t0, t0, t1
sw t0, 192(sp)
lw t0, 76(sp)
sw t0, 196(sp)
li t1, 7
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 200(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 204(sp)
lw t0, 192(sp)
lw t1, 204(sp)
add t0, t0, t1
sw t0, 208(sp)
lw t0, 80(sp)
sw t0, 212(sp)
li t1, 8
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 216(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 220(sp)
lw t0, 208(sp)
lw t1, 220(sp)
add t0, t0, t1
sw t0, 224(sp)
lw t0, 84(sp)
sw t0, 228(sp)
li t1, 9
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 232(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 236(sp)
lw t0, 224(sp)
lw t1, 236(sp)
add t0, t0, t1
sw t0, 240(sp)
sw t0, 44(sp)
j f1_1_exit_3
f1_1_bb_1:
j f1_1_exit_3
f1_1_exit_3:
lw t0, 44(sp)
sw t0, 244(sp)
mv a0, t0
addi sp, sp, 256
ret
.text
.globl f2
f2:
addi sp, sp, -256
f2_2_entry_1:
li t0, 0
sw t0, 44(sp)
j f2_2_bb_4
f2_2_bb_4:
mv t0, a0
sw t0, 48(sp)
mv t0, a1
sw t0, 52(sp)
mv t0, a2
sw t0, 56(sp)
mv t0, a3
sw t0, 60(sp)
mv t0, a4
sw t0, 64(sp)
mv t0, a5
sw t0, 68(sp)
mv t0, a6
sw t0, 72(sp)
mv t0, a7
sw t0, 76(sp)
lw t0, 256(sp)
sw t0, 80(sp)
lw t0, 260(sp)
sw t0, 84(sp)
lw t0, 48(sp)
sw t0, 88(sp)
li t1, 0
li t2, 40
mul t1, t1, t2
add t0, t0, t1
sw t0, 92(sp)
li t1, 9
li t2, 40
mul t1, t1, t2
add t0, t0, t1
sw t0, 96(sp)
lw t0, 52(sp)
sw t0, 100(sp)
li t1, 1
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 104(sp)
lw t1, 96(sp)
lw t0, 0(t1)
sw t0, 108(sp)
lw t1, 104(sp)
lw t0, 0(t1)
sw t0, 112(sp)
lw t0, 108(sp)
lw t1, 112(sp)
add t0, t0, t1
sw t0, 116(sp)
lw t0, 56(sp)
sw t0, 120(sp)
lw t0, 116(sp)
lw t1, 120(sp)
add t0, t0, t1
sw t0, 124(sp)
lw t0, 60(sp)
sw t0, 128(sp)
li t1, 3
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 132(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 136(sp)
lw t0, 124(sp)
lw t1, 136(sp)
add t0, t0, t1
sw t0, 140(sp)
lw t0, 64(sp)
sw t0, 144(sp)
li t1, 4
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 148(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 152(sp)
lw t0, 140(sp)
lw t1, 152(sp)
add t0, t0, t1
sw t0, 156(sp)
lw t0, 68(sp)
sw t0, 160(sp)
li t1, 5
li t2, 400
mul t1, t1, t2
add t0, t0, t1
sw t0, 164(sp)
li t1, 5
li t2, 400
mul t1, t1, t2
add t0, t0, t1
sw t0, 168(sp)
li t1, 5
li t2, 40
mul t1, t1, t2
add t0, t0, t1
sw t0, 172(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 176(sp)
lw t0, 156(sp)
lw t1, 176(sp)
add t0, t0, t1
sw t0, 180(sp)
lw t0, 72(sp)
sw t0, 184(sp)
li t1, 6
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 188(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 192(sp)
lw t0, 180(sp)
lw t1, 192(sp)
add t0, t0, t1
sw t0, 196(sp)
lw t0, 76(sp)
sw t0, 200(sp)
li t1, 7
li t2, 4
mul t1, t1, t2
add t0, t0, t1
sw t0, 204(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 208(sp)
lw t0, 196(sp)
lw t1, 208(sp)
add t0, t0, t1
sw t0, 212(sp)
lw t0, 80(sp)
sw t0, 216(sp)
lw t0, 212(sp)
lw t1, 216(sp)
add t0, t0, t1
sw t0, 220(sp)
lw t0, 84(sp)
sw t0, 224(sp)
li t1, 9
li t2, 40
mul t1, t1, t2
add t0, t0, t1
sw t0, 228(sp)
li t1, 8
li t2, 40
mul t1, t1, t2
add t0, t0, t1
sw t0, 232(sp)
mv t1, t0
lw t0, 0(t1)
sw t0, 236(sp)
lw t0, 220(sp)
lw t1, 236(sp)
add t0, t0, t1
sw t0, 240(sp)
sw t0, 44(sp)
j f2_2_exit_2
f2_2_bb_3:
j f2_2_exit_2
f2_2_exit_2:
lw t0, 44(sp)
sw t0, 244(sp)
mv a0, t0
addi sp, sp, 256
ret
.text
.globl main
main:
li t0, -4304
add sp, sp, t0
li t1, 4300
add t1, sp, t1
sw ra, 0(t1)
main_3_entry_1:
li t0, 0
li t1, 4016
add t1, sp, t1
sw t0, 0(t1)
j main_3_bb_2
main_3_bb_2:
li t0, 0
li t1, 4020
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 0
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4024
add t1, sp, t1
sw t0, 0(t1)
li t1, 4024
add t1, sp, t1
lw a0, 0(t1)
call init
addi t0, sp, 12
li t1, 0
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4028
add t1, sp, t1
sw t0, 0(t1)
li t1, 4028
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4032
add t1, sp, t1
sw t0, 0(t1)
li t1, 4032
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4036
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 1
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4040
add t1, sp, t1
sw t0, 0(t1)
li t1, 4040
add t1, sp, t1
lw t0, 0(t1)
li t1, 1
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4044
add t1, sp, t1
sw t0, 0(t1)
li t1, 4044
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4048
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 2
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4052
add t1, sp, t1
sw t0, 0(t1)
li t1, 4052
add t1, sp, t1
lw t0, 0(t1)
li t1, 2
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4056
add t1, sp, t1
sw t0, 0(t1)
li t1, 4056
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4060
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 3
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4064
add t1, sp, t1
sw t0, 0(t1)
li t1, 4064
add t1, sp, t1
lw t0, 0(t1)
li t1, 3
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4068
add t1, sp, t1
sw t0, 0(t1)
li t1, 4068
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4072
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 4
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4076
add t1, sp, t1
sw t0, 0(t1)
li t1, 4076
add t1, sp, t1
lw t0, 0(t1)
li t1, 4
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4080
add t1, sp, t1
sw t0, 0(t1)
li t1, 4080
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4084
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 5
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4088
add t1, sp, t1
sw t0, 0(t1)
li t1, 4088
add t1, sp, t1
lw t0, 0(t1)
li t1, 5
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4092
add t1, sp, t1
sw t0, 0(t1)
li t1, 4092
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4096
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 6
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4100
add t1, sp, t1
sw t0, 0(t1)
li t1, 4100
add t1, sp, t1
lw t0, 0(t1)
li t1, 6
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4104
add t1, sp, t1
sw t0, 0(t1)
li t1, 4104
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4108
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 7
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4112
add t1, sp, t1
sw t0, 0(t1)
li t1, 4112
add t1, sp, t1
lw t0, 0(t1)
li t1, 7
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4116
add t1, sp, t1
sw t0, 0(t1)
li t1, 4116
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4120
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 8
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4124
add t1, sp, t1
sw t0, 0(t1)
li t1, 4124
add t1, sp, t1
lw t0, 0(t1)
li t1, 8
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4128
add t1, sp, t1
sw t0, 0(t1)
li t1, 4128
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4132
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 9
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4136
add t1, sp, t1
sw t0, 0(t1)
li t1, 4136
add t1, sp, t1
lw t0, 0(t1)
li t1, 9
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4140
add t1, sp, t1
sw t0, 0(t1)
li t1, 4140
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4144
add t1, sp, t1
sw t0, 0(t1)
li t1, 4036
add t1, sp, t1
lw a0, 0(t1)
li t1, 4048
add t1, sp, t1
lw a1, 0(t1)
li t1, 4060
add t1, sp, t1
lw a2, 0(t1)
li t1, 4072
add t1, sp, t1
lw a3, 0(t1)
li t1, 4084
add t1, sp, t1
lw a4, 0(t1)
li t1, 4096
add t1, sp, t1
lw a5, 0(t1)
li t1, 4108
add t1, sp, t1
lw a6, 0(t1)
li t1, 4120
add t1, sp, t1
lw a7, 0(t1)
li t1, 4132
add t1, sp, t1
lw t0, 0(t1)
sw t0, 0(sp)
li t1, 4144
add t1, sp, t1
lw t0, 0(t1)
sw t0, 4(sp)
call f1
li t1, 4148
add t1, sp, t1
sw a0, 0(t1)
li t1, 4020
add t1, sp, t1
lw t0, 0(t1)
li t1, 4152
add t1, sp, t1
sw t0, 0(t1)
li t1, 4152
add t1, sp, t1
lw t0, 0(t1)
li t1, 4148
add t1, sp, t1
lw t1, 0(t1)
add t0, t0, t1
li t1, 4156
add t1, sp, t1
sw t0, 0(t1)
li t1, 4156
add t1, sp, t1
lw t0, 0(t1)
li t1, 4020
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 0
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4160
add t1, sp, t1
sw t0, 0(t1)
li t1, 4160
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4164
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 1
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4168
add t1, sp, t1
sw t0, 0(t1)
li t1, 4168
add t1, sp, t1
lw t0, 0(t1)
li t1, 1
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4172
add t1, sp, t1
sw t0, 0(t1)
li t1, 4172
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4176
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 2
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4180
add t1, sp, t1
sw t0, 0(t1)
li t1, 4180
add t1, sp, t1
lw t0, 0(t1)
li t1, 2
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4184
add t1, sp, t1
sw t0, 0(t1)
li t1, 4184
add t1, sp, t1
lw t0, 0(t1)
li t1, 2
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4188
add t1, sp, t1
sw t0, 0(t1)
li t1, 4188
add t1, sp, t1
lw t1, 0(t1)
lw t0, 0(t1)
li t1, 4192
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 3
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4196
add t1, sp, t1
sw t0, 0(t1)
li t1, 4196
add t1, sp, t1
lw t0, 0(t1)
li t1, 3
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4200
add t1, sp, t1
sw t0, 0(t1)
li t1, 4200
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4204
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 4
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4208
add t1, sp, t1
sw t0, 0(t1)
li t1, 4208
add t1, sp, t1
lw t0, 0(t1)
li t1, 4
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4212
add t1, sp, t1
sw t0, 0(t1)
li t1, 4212
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4216
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 0
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4220
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 6
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4224
add t1, sp, t1
sw t0, 0(t1)
li t1, 4224
add t1, sp, t1
lw t0, 0(t1)
li t1, 6
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4228
add t1, sp, t1
sw t0, 0(t1)
li t1, 4228
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4232
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 7
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4236
add t1, sp, t1
sw t0, 0(t1)
li t1, 4236
add t1, sp, t1
lw t0, 0(t1)
li t1, 7
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4240
add t1, sp, t1
sw t0, 0(t1)
li t1, 4240
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4244
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 8
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4248
add t1, sp, t1
sw t0, 0(t1)
li t1, 4248
add t1, sp, t1
lw t0, 0(t1)
li t1, 8
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4252
add t1, sp, t1
sw t0, 0(t1)
li t1, 4252
add t1, sp, t1
lw t0, 0(t1)
li t1, 8
li t2, 4
mul t1, t1, t2
add t0, t0, t1
li t1, 4256
add t1, sp, t1
sw t0, 0(t1)
li t1, 4256
add t1, sp, t1
lw t1, 0(t1)
lw t0, 0(t1)
li t1, 4260
add t1, sp, t1
sw t0, 0(t1)
addi t0, sp, 12
li t1, 9
li t2, 400
mul t1, t1, t2
add t0, t0, t1
li t1, 4264
add t1, sp, t1
sw t0, 0(t1)
li t1, 4264
add t1, sp, t1
lw t0, 0(t1)
li t1, 0
li t2, 40
mul t1, t1, t2
add t0, t0, t1
li t1, 4268
add t1, sp, t1
sw t0, 0(t1)
li t1, 4164
add t1, sp, t1
lw a0, 0(t1)
li t1, 4176
add t1, sp, t1
lw a1, 0(t1)
li t1, 4192
add t1, sp, t1
lw a2, 0(t1)
li t1, 4204
add t1, sp, t1
lw a3, 0(t1)
li t1, 4216
add t1, sp, t1
lw a4, 0(t1)
li t1, 4220
add t1, sp, t1
lw a5, 0(t1)
li t1, 4232
add t1, sp, t1
lw a6, 0(t1)
li t1, 4244
add t1, sp, t1
lw a7, 0(t1)
li t1, 4260
add t1, sp, t1
lw t0, 0(t1)
sw t0, 0(sp)
li t1, 4268
add t1, sp, t1
lw t0, 0(t1)
sw t0, 4(sp)
call f2
li t1, 4272
add t1, sp, t1
sw a0, 0(t1)
li t1, 4020
add t1, sp, t1
lw t0, 0(t1)
li t1, 4276
add t1, sp, t1
sw t0, 0(t1)
li t1, 4276
add t1, sp, t1
lw t0, 0(t1)
li t1, 4272
add t1, sp, t1
lw t1, 0(t1)
add t0, t0, t1
li t1, 4280
add t1, sp, t1
sw t0, 0(t1)
li t1, 4280
add t1, sp, t1
lw t0, 0(t1)
li t1, 4020
add t1, sp, t1
sw t0, 0(t1)
li t1, 4020
add t1, sp, t1
lw t0, 0(t1)
li t1, 4284
add t1, sp, t1
sw t0, 0(t1)
li t1, 4284
add t1, sp, t1
lw a0, 0(t1)
call putint
li a0, 10
call putch
li t0, 0
li t1, 4016
add t1, sp, t1
sw t0, 0(t1)
j main_3_exit_4
main_3_bb_3:
j main_3_exit_4
main_3_exit_4:
li t1, 4016
add t1, sp, t1
lw t0, 0(t1)
li t1, 4288
add t1, sp, t1
sw t0, 0(t1)
li t1, 4288
add t1, sp, t1
lw a0, 0(t1)
li t1, 4300
add t1, sp, t1
lw ra, 0(t1)
li t0, 4304
add sp, sp, t0
ret
|
VidyaBipin/rust_os
| 2,086
|
Usermode/rustrt0/armv8-helpers.S
|
.macro EXIDX method handle
/*
.long EXIDX_\method
.section .ARM.exidx.\method, #exidx
.globl EXIDX_\method
EXIDX_\method: .long \method - . - 0x80000000, \handle
.section .text
*/
.endm
.section .text
ENTRY(__aeabi_memcpy4)
ENTRY(__aeabi_memcpy8)
tst x2,x2
beq 2f
1:
LDR w3, [x1], #4
STR w3, [x0], #4
SUBS x2, x2, #4
BGT 1b
2:
ret
EXIDX __aeabi_memcpy4, 0x80B0B0B0
ENTRY(__aeabi_memcpy)
tst x2,x2
beq 2f
1:
LDRB w3, [x1], #1
STRB w3, [x0], #1
SUBS x2, x2, #1
BGT 1b
2:
ret
ENTRY(memcpy)
b __aeabi_memcpy
EXIDX __aeabi_memcpy, 0x80B0B0B0
ENTRY(memmove)
tst x2,x2
beq 2f
cmp x0,x1
blt __aeabi_memcpy // DST < SRC, memcpy
add x3,x1,x2
cmp x0,x3
bgt __aeabi_memcpy // DST > SRC+NUM, memcpy
add x0,x0,x2
add x1,x1,x2
1: // Reverse copy
LDRB w3, [x1,#-1]!
STRB w3, [x0,#-1]!
SUBS x2, x2, #1
BGT 1b
2:
ret
EXIDX memmove, 0x80B0B0B0
ENTRY(__aeabi_memset8)
//.fnstart
tst x1, x1
beq 2f
LSL x3, x2, #8
ORR x2, x2, x3
LSL x3, x2, #16
ORR x2, x2, x3
LSL x3, x2, #32
ORR x2, x2, x3
1:
STR x2, [x0], #8
SUBS x1, x1, #8
BGT 1b
2:
ret
//.fnend
ENTRY(__aeabi_memset4)
//.fnstart
tst x1, x1
beq 2f
LSL w3, w2, #8
ORR w2, w2, w3
LSL w3, w2, #16
ORR w2, w2, w3
1:
STR w2, [x0], #4
SUBS x1, x1, #4
BGT 1b
2:
ret
//.fnend
ENTRY(__aeabi_memclr4)
ENTRY(__aeabi_memclr8)
MOV x2, #0
b 1b
ENTRY(__aeabi_memset)
tst x1, x1
beq 2f
1:
strb w2, [x0], #1
subs x1,x1, #1
bne 1b
2:
ret
ENTRY(memset)
//.fnstart
tst x2, x2
beq 2f
1:
strb w1, [x0], #1
subs x2,x2, #1
bne 1b
2:
ret
//.fnend
ENTRY(__aeabi_memclr)
MOV x2, #0
b __aeabi_memset
// A, B, num
ENTRY(memcmp)
//.fnstart
tst x2,x2
beq 2f
1:
ldrb w3, [x0], #1
ldrb w4, [x1], #1
cmp w4, w3
bne 2f
subs x2,x2, #1
bne 1b
2:
mov x0, #0
beq 3f
cinv x0,x0, lo
cinc x0,x0, hs
3:
ret
ENTRY(__aeabi_dcmplt)
ENTRY(__aeabi_dcmple)
ENTRY(__aeabi_dcmpeq)
ENTRY(__aeabi_dcmpge)
ENTRY(__aeabi_dcmpgt)
b .
ENTRY(__aeabi_fcmplt)
ENTRY(__aeabi_fcmple)
ENTRY(__aeabi_fcmpeq)
ENTRY(__aeabi_fcmpge)
ENTRY(__aeabi_fcmpgt)
b .
EXIDX __aeabi_unwind_cpp_pr0, 0x1
ENTRY(_Unwind_Resume)
b .
|
VidyaBipin/rust_os
| 5,462
|
Usermode/rustrt0/armv7-helpers.S
|
@
@ MAGIC MACRO
@
.macro EXIDX method handle
.long EXIDX_\method
.pushsection .ARM.exidx.\method, #exidx
.globl EXIDX_\method
EXIDX_\method: .long \method - . - 0x80000000, \handle
.popsection
.endm
#define ENTRY_(v) .globl v ; v:
.macro USER_LOG message
ldr r0, =9f
mov r1, #(10f - 9f)
mov r12, #0
svc #0
.pushsection .rodata
9: .ascii "\message"
10:
.popsection
.endm
.macro TODO_STUB v
ENTRY(\v)
USER_LOG "TODO: \v"
b .
.endm
.section .text
TODO_STUB __aeabi_memmove4
EXIDX __aeabi_memmove4, 0x80B0B0B0
@ R0 = dst
@ R1 = src
@ R2 = cnt
ENTRY(memmove)
movs r2, r2
bxeq lr
@ TODO: Have memmove4 for 4 byte aligned versions
@ If DST < SRC: Check for overlap
cmp r0, r1
bxeq lr @ if DST==SRC, return early
blt __aeabi_memcpy
@ SRC < DST: Possible slow path
add r3, r1, r2
cmp r3, r0
@ If SRC + CNT <= DST, go the fast path
ble __aeabi_memcpy
@ Reverse slow path
add r1, r2
add r0, r2
1:
LDRB r3, [r1, #-1]!
STRB r3, [r0, #-1]!
SUBS r2, r2, #1
BNE 1b
BX lr
ENTRY(memset)
.fnstart
movs r2, r2
beq 2f
1:
strb r1, [r0], #1
subs r2, #1
bne 1b
2:
bx lr
.fnend
#if 1
ENTRY(memcpy)
movs r2, r2
bxeq lr
1:
LDRB r3, [r1], #1
STRB r3, [r0], #1
SUBS r2, r2, #1
BGT 1b
BX lr
#else
ENTRY(__aeabi_memcpy4)
ENTRY_(__aeabi_memcpy8)
movs r2, r2
bxeq lr
1:
LDR r3, [r1], #4
STR r3, [r0], #4
SUBS r2, r2, #4
BGT 1b
BX lr
EXIDX __aeabi_memcpy4, 0x80B0B0B0
ENTRY(__aeabi_memcpy)
movs r2, r2
bxeq lr
1:
LDRB r3, [r1], #1
STRB r3, [r0], #1
SUBS r2, r2, #1
BGT 1b
BX lr
ENTRY(memcpy)
b __aeabi_memcpy
EXIDX __aeabi_memcpy, 0x80B0B0B0
ENTRY(__aeabi_memset4)
ENTRY_(__aeabi_memset8)
movs r1, r1
bxeq lr
LSL r3, r2, #8
ORR r2, r2, r3
LSL r3, r2, #16
ORR r2, r2, r3
1:
STR r2, [r0], #4
SUBS r1, r1, #4
BGT 1b
BX lr
EXIDX __aeabi_memset4, 0x80B0B0B0
ENTRY(__aeabi_memclr4)
ENTRY_(__aeabi_memclr8)
MOV r2, #0
b 1b
ENTRY(__aeabi_memset)
.fnstart
movs r1, r1
beq 2f
1:
strb r2, [r0], #1
subs r1, #1
bne 1b
2:
bx lr
.fnend
ENTRY(__aeabi_memclr)
MOV r2, #0
b __aeabi_memset
@ 64-bit divide/modulo
ENTRY(__aeabi_uldivmod)
push {r4-r7,lr}
@ R1:R0 = Numerator
@ R3:R2 = Denominator
@ 1. Find the largest power of two factor of n and d
@let mut add = 1;
mov r6,#1
mov r7,#0
@ -- Save n/2 in r5:r4
lsrs r5, r0,#1
lsr r4, r0,#1
@while n / 2 >= d && add != 0 {
1:
cmp r9,r3
blt 2f
cmp r8,r2
blt 2f
@ d <<= 1;
lsls r2, r2, #1
lsl r3, r3, #1
addcs r3, r3, #1
@ add <<= 1;
lsls r6, r6, #1
lsl r7, r7, #1
addcs r7, r7, #1
bcs _div0
@}
b 1b
2:
@let mut ret = 0;
mov r4,#0
mov r5,#0
@while add > 0 {
1:
movs r7,r7
moveqs r6,r6
beq 3f
@ if n >= d {
cmp r1,r3
blt 2f
cmp r0,r2
blt 2f
@ ret += add;
orr r4, r4,r6
orr r5, r5,r7
@ n -= d;
subs r0,r2
sbc r1,r3
@ }
2:
@ add >>= 1;
lsrs r7, r7, #1
rrx r6, r6
@ d >>= 1;
lsrs r3, r3, #1
rrx r2, r2
@}
b 1b
3:
@ r2:r3 = Remainder (n)
mov r2, r0
mov r3, r1
@ r0:r1 = Quotient (ret)
mov r0, r4
mov r1, r5
_div0:
pop {r4-r7,pc}
@ 64-bit signed divide/modulo
ENTRY(__aeabi_ldivmod)
push {r4, lr}
mov r4, #0
@ if num < 0 { sign ^= 1; num = -num; }
tst r1,#0x80000000
beq 1f
eor r4, #1
mvn r0,r0
adds r0, #1
mvn r1,r1
adc r1, #0
1:
@ if den < 0 { sign ^= 1; den = -den; }
tst r3,#0x80000000
beq 1f
eor r4, #1
mvn r2,r2
adds r2, #1
mvn r3,r3
adc r3, #0
1:
bl __aeabi_uldivmod
@ if sign != 0 {
movs r4, r4
beq 1f
@ quo = -quo
mvn r0,r0
mvn r1,r1
adds r0, #1
adc r1, #0
@ rem = -rem
mvn r2,r2
mvn r3,r3
adds r2, #1
adc r3, #0
@ }
1:
pop {r4, pc}
@ 32-bit divide/modulo
ENTRY(__aeabi_uidivmod)
push {r4, lr}
lsr r2, r0, #1
mov ip, #0
mov r4, #1
cmp r2, r1
bcs 1f
mov r3, #1
mov r2, #0
b 2f
1: mov lr, #0
1: lsl r3, r4, #1
lsl r1, r1, #1
cmp lr, r4, lsl #1
beq 1f
mov r4, r3
cmp r2, r1
bcs 1b
1: mov r2, #0
cmp r3, #0
beq 3f
2: cmp r0, r1
lsr r4, r3, #1
subcs r0, r0, r1
addcs r2, r2, r3
cmp ip, r3, lsr #1
lsr r1, r1, #1
mov r3, r4
bne 2b
3:
mov r1, r0 @ R1 is remainder
mov r0, r2 @ R0 is quotient
pop {r4, pc}
@ 32-bit signed divide/modulo
ENTRY(__aeabi_idivmod)
push {r4, lr}
mov r4, #0
@ if num < 0 { sign ^= 1; num = -num; }
tst r0,#0x80000000
beq 1f
eor r4, #1
mvn r0,r0
add r0, #1
1:
@ if den < 0 { sign ^= 1; den = -den; }
tst r1,#0x80000000
beq 1f
eor r4, #1
mvn r1,r1
add r1, #1
1:
bl __aeabi_uidivmod
@ if sign != 0 {
movs r4, r4
beq 1f
@ quo = -quo
mvn r0,r0
add r0, #1
@ rem = -rem
mvn r1,r1
add r1, #1
@ }
1:
pop {r4, pc}
@@ 32-bit unsigned divide
@ENTRY(__aeabi_uidiv)
@ b __aeabi_uidivmod
@@ 32-bit unsigned modulo
@ENTRY(__umoddi3)
@ push {lr}
@ bl __aeabi_uidivmod
@ mov r0, r1
@ pop {pc}
@@ 32-bit signed modulo
@ENTRY(__umodsi3)
@ push {lr}
@ bl __aeabi_idivmod
@ mov r0, r1
@ pop {pc}
#endif
// A, B, num
ENTRY(memcmp)
push {r4}
movs r2,r2
mov r3, #0
mov r4, #0
beq 2f
1:
ldrb r3, [r0], #1
ldrb r4, [r1], #1
cmp r4, r3
bne 2f
subs r2, #1
bne 1b
2:
movhs r0, #1
movlo r0, #-1
moveq r0, #0
pop {r4}
mov pc, lr
EXIDX memcmp, 0x80A0B0B0
ENTRY(__aeabi_unwind_cpp_pr0)
ENTRY_(__aeabi_unwind_cpp_pr1)
b .
#if 0
TODO_STUB __aeabi_dcmplt
TODO_STUB __aeabi_dcmple
TODO_STUB __aeabi_dcmpeq
TODO_STUB __aeabi_dcmpge
TODO_STUB __aeabi_dcmpgt
TODO_STUB __aeabi_fcmplt
TODO_STUB __aeabi_fcmple
TODO_STUB __aeabi_fcmpeq
TODO_STUB __aeabi_fcmpge
TODO_STUB __aeabi_fcmpgt
b .
#endif
EXIDX __aeabi_unwind_cpp_pr0, 0x1
ENTRY(_Unwind_Resume)
b .
@ vim: ft=armasm
|
VidyaBipin/rust_os
| 1,436
|
Usermode/rustrt0/amd64.S
|
#include "common.S"
.section .text.start
.extern main
.extern register_arguments
.weak start
start:
call *start_ra(%rip)
mov $0, %rdi
mov $0, %rsi
call *start_mn(%rip)
// Save return value for EXITPROCESS call
mov %rax, %rdi
mov $CORE_EXIT, %rax
syscall
ud2
start_ra: .quad register_arguments
start_mn: .quad main
.section .text.memfcns
// RDI = Address
// RSI = Value
// RDX = Count
.globl memset
memset:
mov %rsi, %rax
mov %rdx, %rcx
mov %rdi, %rdx // Save RDI
rep stosb
mov %rdx, %rax // because it's the return value
ret
// RDI = Destination
// RSI = Source
// RDX = Count
.globl memcpy
memcpy:
mov %rdi, %rax // Prepare to return RDI
// Check if a word-wise copy is possible
test $7, %di
jnz 1f
test $7, %si
jnz 1f
test $7, %dx
jnz 1f
mov %rdx, %rcx
shr $3, %rcx
rep movsq
ret
1:
mov %rdx, %rcx
rep movsb
ret
// RDI = Destination
// RSI = Source
// RDX = Count
.globl memmove
memmove:
mov %rdi, %rax // Prepare to return RDI
cmp %rdi, %rsi
jb memcpy // If RDI < RSI
add %rsi, %rdx
cmp %rdi, %rsi
ja memcpy // If RDI > RSI+RDX
add %rdx, %rdi
mov %rdx, %rcx
std
rep movsb
ret
;; RDI = A
;; RSI = B
;; RDX = Count
.globl memcmp
memcmp:
mov %rdx, %rcx
rep cmpsb
mov $0, %rax
ja .pos
jb .neg
ret
.pos:
dec %rax
ret
.neg:
inc %rax
ret
// Unknown args, don't care
.globl _Unwind_Resume
_Unwind_Resume:
jmp .
.section .rodata
start_msg: .ascii "RustRT0 start"
start_msg_end:
|
VidyaBipin/rust_os
| 1,436
|
Kernel/Core/log_cfg.S
|
#define LEVEL_LOG 5
#define LEVEL_DEBUG 6
#define LEVEL_TRACE 7
#if __SIZEOF_POINTER__ == 8
# define DEF_PTR(...) .quad __VA_ARGS__
# define LOG_CFG_ENT_PAD .long 0
#elif __SIZEOF_POINTER__ == 4
# define DEF_PTR(...) .long __VA_ARGS__
# define LOG_CFG_ENT_PAD
#endif
#define LOG_CFG_ENT(str, level) \
.set log_cfg_count_inner, log_cfg_count_inner + 1 ; \
.section .rodata.logcfg ;\
DEF_PTR(1f) ; \
.short 2f-1f ; \
.short level ; \
LOG_CFG_ENT_PAD ; \
.section .rodata.strings ; \
1: ;\
.ascii str ;\
2: ;\
.section .rodata.logcfg
.set log_cfg_count_inner, 0
.section .rodata.logcfg
.align 8
.globl log_cfg_data
log_cfg_data:
LOG_CFG_ENT("kernel::memory::heap::heapdef", LEVEL_DEBUG) // Heap only prints higher than debug
LOG_CFG_ENT("kernel::memory::phys" , LEVEL_TRACE) // PMM only prints >Trace
//LOG_CFG_ENT("kernel::metadevs::storage", LEVEL_TRACE)
LOG_CFG_ENT("kernel::arch::imp::acpi::internal::shim_out", LEVEL_TRACE) // Shut up the x86 ACPI shim
LOG_CFG_ENT("kernel::async", LEVEL_DEBUG)
LOG_CFG_ENT("kernel::irqs", LEVEL_DEBUG)
LOG_CFG_ENT("kernel::sync::event_channel", LEVEL_DEBUG)
//LOG_CFG_ENT("fs_fat", LEVEL_DEBUG)
//LOG_CFG_ENT("storage_ata::io", LEVEL_DEBUG)
LOG_CFG_ENT("input_ps2::i8042", LEVEL_TRACE)
LOG_CFG_ENT("syscalls::gui_calls", LEVEL_DEBUG)
.globl log_cfg_count
#log_cfg_count: DEF_PTR( (log_cfg_count - log_cfg_data)/(8+4+4) )
log_cfg_count: DEF_PTR( log_cfg_count_inner )
|
VidyaBipin/rust_os
| 8,999
|
Kernel/Core/arch/riscv64/start.S
|
// "Tifflin" Kernel
// - By John Hodge (Mutabah)
//
// Core/arch/riscv64/start.asm
// - RISC-V 64-bit boot shim
.option norvc #
#define KERNEL_BASE 0xFFFFFFFF00000000
#define SBI_BOOT 1
#
# Initial boot
#
.section .text.init
.global start
start:
#if SBI_BOOT
# SBI boot from qemu:
# a0 = HART ID
# a1 = FDT base
mv t5, a0
mv t6, a1
la a0, fdt_phys_addr_PHYS
sd t6, (a0)
#else
csrr t0, mhartid // Read the [Hard]dware [T]hread ID
bnez t0, ap_wait // if non-zero, go into the AP wait
#endif
# -- Clear the BSS
.extern __bss_start
.extern __bss_end
la a0, __bss_start_LOW
la a1, __bss_end_LOW
bgeu a0, a1, 2f
1:
sd zero, (a0)
addi a0, a0, 8
bltu a0, a1, 1b
2:
# -- Prepare the stack
li sp, (0xFFFFFFD800000000 + 4*0x1000)
# -- Set up paging (39 bit, ASID=0)
.extern boot_pt_lvl3_0_PHYS_PG
la a0, boot_pt_lvl3_0_PHYS_PG
li a1, 8
sll a1, a1, 60
or a0, a0, a1 # 8 = 39bit virtual address space (0=48bit)
csrw satp, a0
# -- Prepare an exception frame from which to `mret`
#if SBI_BOOT
# SPP = Supervisor Previous Permission (Set to 1 to stay in supervisor mode)
# SPIE = Supervisor Previous Interurpt Enable (enable interrupts on mret/sret)
li t0, (1 << 18) | (1 << 8) | (1 << 5) # SUM=0, SPP[8]=1, Interrupts on (SPIE[5]=1)
csrw sstatus, t0
#else
li t0, (1 << 18) + (0b01 << 11) + (1 << 7) + (1 << 3) # "Supervisor mode" (MPP[11:12]=1), Interrupts on (MPIE, MIE:3=1)
csrw mstatus, t0
#endif
li t0, 0x203 # Supervisor External (0x200), (Svz timer [0x20] off), Supervisor+User Software (0x3)
csrw sie, t0
la t0, kmain # Common entrypoint, store in `mret`'s target
#if SBI_BOOT
csrw sepc, t0
#else
csrw mepc, t0
#endif
la t0, trap_vector # Supervisor-level error handler
csrw stvec, t0
la t0, HART0_STATE
csrw sscratch, t0
la ra, 1f # Set a return address for `kmain`... just in case
start.pre_kmain: # Symbol for easy breakpoint
#if SBI_BOOT
sret // Supervisor interrupt return
#else
mret // Machine interrupt return
#endif
1:
wfi
j 1b
ap_wait:
wfi
# Load an address, and jump to that (pre-initialised to just jump back to `ap_wait`)
ld a1, ap_wait_target_ptr
ld a0, (a1)
jr a0
.section .initdata
# > Reference these from `.data` and `.padata`
ap_wait_target_ptr: .quad ap_wait_target - KERNEL_BASE
boot_pt_lvl3_0_PHYS_ptr: .quad boot_pt_lvl3_0 - KERNEL_BASE
#
//
//
//
.section .text
.globl _Unwind_Resume
_Unwind_Resume:
j .
.macro dumpregs op=sd, i=1
\op x31, -1*8(sp) # T6
\op x30, -2*8(sp) # T5
\op x29, -3*8(sp) # T4
\op x28, -4*8(sp) # T3
\op x27, -5*8(sp) # S11
\op x26, -6*8(sp) # S10
\op x25, -7*8(sp) # S9
\op x24, -8*8(sp) # S8
\op x23, -9*8(sp) # S7
\op x22, -10*8(sp) # S6
\op x21, -11*8(sp) # S5
\op x20, -12*8(sp) # S4
\op x19, -13*8(sp) # S3
\op x18, -14*8(sp) # S2
\op x17, -15*8(sp) # A7
\op x16, -16*8(sp) # A6
\op x15, -17*8(sp) # A5
\op x14, -18*8(sp) # A4
\op x13, -19*8(sp) # A3
\op x12, -20*8(sp) # A2
\op x11, -21*8(sp) # A1
\op x10, -22*8(sp) # A0
\op x9, -23*8(sp) # S1
\op x8, -24*8(sp) # S0
\op x7, -25*8(sp) # T2
\op x6, -26*8(sp) # T1
\op x5, -27*8(sp) # T0
\op x4, -28*8(sp) # TP
\op x3, -29*8(sp) # GP
#\op x2, -30*8(sp) # SP
\op x1, -31*8(sp) # RA
.endm
#define SSTAUS_OFS_SCRATCH 0
#define SSTAUS_OFS_SVZ_SP 8
.globl trap_vector
.extern trap_vector_rs
trap_vector:
# NOTE: SIE is cleared by the CPU upon entry
# Step 1: Save a scratch register to CPU-local storage
csrrw t0,sscratch,t0 # Swap T0 into sscratch
sd t1, SSTAUS_OFS_SCRATCH(t0) # Store T1 at that address
# Step 2: Determine if the trap as from within the kernel (supervisor mode)
csrr t1, sstatus # Read SStatus
andi t1, t1, 0x100 # Check SPP (bit 8)
bnez t1, .Lkernel_sp # If nonzero (was in kernel), then don't update the stack
# Step 3a: From usermode, load the kernel stack
# Load kernel stack
ld t1, SSTAUS_OFS_SVZ_SP(t0)
sd sp, -30*8(t1) # Save user's SP
mv sp, t1
.Lcommon:
# Restore T1 and T0 now that we can use SP
ld t1, SSTAUS_OFS_SCRATCH(t0)
csrrw t0,sscratch,t0
# Save all GPRs
dumpregs sd
add sp, sp, -31*8
# Save various CSRs
csrr t1, sstatus; sd t1, -4*8(sp)
csrr t0, stval ; sd t0, -3*8(sp)
csrr t0, sepc ; sd t0, -2*8(sp)
csrr t0, scause ; sd t0, -1*8(sp)
add sp, sp, -4*8
# Check top bit of scause, if set also skip setting SIE
srli t0, t0, 63
bnez t0, 1f
# If from userland, set SIE
#and t2, t1, (1<<8)
#beqz t2, 2f
# If SPIE is set, set SIE (now that everything's saved)
and t2, t1, (1<<5)
beqz t2, 1f
# If safe to be reentrant (SPIE set and not servicing an interrupt) then set SIE
2:
csrsi sstatus, 0x2
1:
# Call handler
mv a0, sp
jal trap_vector_rs
add sp, sp, 4*8
# Restore CSRs that it makes sense to have been modified
ld t0, -4*8(sp); csrw sstatus, t0
ld t0, -2*8(sp); csrw sepc, t0
add sp, sp, 31*8
dumpregs ld
ld sp, -30*8(sp) # Also undoes the add
sret
.Lkernel_sp:
# - Separate code block to improve userland->kernel prefetch performance
# In kernel - check for stack overflow (SP == STVAL)
csrr t1, stval
beq sp, t1, trap_vector.stack_overflow
sd sp, -30*8(sp) # SP, not saved by dumpregs
# Jump to common code
j .Lcommon
trap_vector.stack_overflow:
# Kernel faulted with STVAL=SP, assume a stack overflow
# TODO: Switch to a different kernel stack, and enter a kernel panic
j .
#include "../../../../Usermode/rustrt0/riscv64-helpers.S"
#
# See arch::threads::start_thread
#
.global thread_trampoline
thread_trampoline:
ld t0, 0(sp) # pop the body shim
ld a0, 8(sp) # Pop the data pointer
add sp, sp, 16
jalr t0
j .
# ARGS:
# - a0: old sp destination
# - a1: new sp
# - a2: new satp
# - a3: new thread pointer
.global task_switch
task_switch:
add sp, sp, -15*8
sd ra, 14*8(sp)
sd gp, 13*8(sp)
sd tp, 12*8(sp)
sd s0, 0*8(sp)
sd s1, 1*8(sp)
sd s2, 2*8(sp)
sd s3, 3*8(sp)
sd s4, 4*8(sp)
sd s5, 5*8(sp)
sd s6, 6*8(sp)
sd s7, 7*8(sp)
sd s8, 8*8(sp)
sd s9, 9*8(sp)
sd s10, 10*8(sp)
sd s11, 11*8(sp)
# Disable interrupts (clear SIE)
csrci sstatus, 0x2
# Save SP, get new SP, switch SATP
sd sp, (a0)
mv sp, a1
csrw satp, a2
# Re-enable interrupts (set SIE)
csrsi sstatus, 0x2
# Pop state
ld ra, 14*8(sp)
ld gp, 13*8(sp)
ld tp, 12*8(sp)
ld s0, 0*8(sp)
ld s1, 1*8(sp)
ld s2, 2*8(sp)
ld s3, 3*8(sp)
ld s4, 4*8(sp)
ld s5, 5*8(sp)
ld s6, 6*8(sp)
ld s7, 7*8(sp)
ld s8, 8*8(sp)
ld s9, 9*8(sp)
ld s10, 10*8(sp)
ld s11, 11*8(sp)
add sp, sp, 15*8
# Return
.globl task_switch.resume
task_switch.resume:
ret
.section .data
.global ap_wait_target
ap_wait_target: .quad ap_wait
.global fdt_phys_addr
fdt_phys_addr: .quad 0
.section .padata
# Three level PTs
# - Top-level covers 512GB, 1GB per slot
# Useful bits:
# - 0: Valid (must be set)
# - 1-3: RWX
# - 4: User (Note: Supervisor can't read these without a flag set)
# - 5: Global
# - 6: Accessed
# - 7: Written
.global boot_pt_lvl3_0
boot_pt_lvl3_0:
.extern boot_pt_lvl2_stacks_PHYS_DIV4
.extern boot_pt_lvl2_hwmaps_PHYS_DIV4
.extern boot_pt_lvl2_final_PHYS_DIV4
.quad 0
.quad 0
.quad _phys_base_DIV4 + (3 << 6) + (7 << 1) + (1 << 0) # Kernel at 2GB, 1GB map RWX (identity)
.rept 256-3
.quad 0
.endr
# 0x00000040_00000000
# 0xFFFFFFC0_00000000
.rept 128-(8*4)
.quad 0
.endr
# 0xFFFFFFD8_00000000: Stacks base
.quad boot_pt_lvl2_stacks_PHYS_DIV4 + (1 << 0)
.rept (8*4-1)
.quad 0
.endr
# 0xFFFFFFE0_00000000: Stacks end
.rept 128-3
.quad 0
.endr
# 0xFFFFFFFF_40000000
.quad boot_pt_lvl2_hwmaps_PHYS_DIV4 + (1 << 0)
# NOTE: Fractal mapping doesn't work (needs permissions bits on the bottom layer)
.quad _phys_base_DIV4 + (3 << 6) + (7 << 1) + (1 << 0) # Kernel at -2GB, 1GB map RWX
.quad boot_lt_lvl2_final_PHYS_DIV4 + (1 << 0)
# --- Kernel Stacks (prepared with a 3 page boot stack) ---
# 1GB coverage, 2MiB per slot
.global boot_pt_lvl2_stacks
boot_pt_lvl2_stacks:
.extern boot_pt_lvl1_stacks_PHYS_DIV4
.quad boot_pt_lvl1_stacks_PHYS_DIV4 + (1 << 0)
.rept 512-1
.quad 0
.endr
.global boot_pt_lvl1_stacks
boot_pt_lvl1_stacks:
.extern init_stack_PHYS_DIV4
.quad 0
.quad init_stack_PHYS_DIV4 + (0*0x1000/4) + (3 << 1) + (1 << 0)
.quad init_stack_PHYS_DIV4 + (1*0x1000/4) + (3 << 1) + (1 << 0)
.quad init_stack_PHYS_DIV4 + (2*0x1000/4) + (3 << 1) + (1 << 0)
.rept 512-4
.quad 0
.endr
# --- Hardware mappings (prepared with UART) ---
# 1GB coverage, 2MiB per slot
.global boot_pt_lvl2_hwmaps
.extern boot_pt_lvl1_hwmaps_PHYS_DIV4
boot_pt_lvl2_hwmaps:
.quad boot_pt_lvl1_hwmaps_PHYS_DIV4 + (1 << 0)
.rept 512-1
.quad 0
.endr
# 2MiB coverage, 4KiB per slot
.global boot_pt_lvl1_hwmaps
boot_pt_lvl1_hwmaps:
.quad (0x10000000)/4 + (3 << 6)+(3<<1)+(1<<0) # qemu UART
.rept 512-1
.quad 0
.endr
# --- Final table (includes temporary mappings, must always be present) ---
.global boot_lt_lvl2_final
boot_lt_lvl2_final:
.rept 512-2
.quad 0
.endr
.quad boot_pt_lvl1_temp_PHYS_DIV4 + (1 << 0)
.quad 0 # Last 2MiB is unmapped
# @0xFFFFFFFF_FFC00000 : Temporary mappings
.global boot_pt_lvl1_temp
boot_pt_lvl1_temp:
.rept 512
.quad 0
.endr
.global init_stack
init_stack:
.space 4096*3, 0
|
VidyaBipin/rust_os
| 10,245
|
Kernel/Core/arch/armv8/start.S
|
//
//
//
#define KERNEL_BASE 0xFFFF800000000000
#define ENTRY(v) .globl v; .type v,"function"; v:
#define GLOBAL(v) .globl v; v:
#define PUSH(_t1,_t2) stp _t1,_t2, [sp, #-16]!
#define POP(_t1,_t2) ldp _t1,_t2, [sp], #16
#define PUSHA() \
PUSH(x29,x30); /* FP and LR */ \
PUSH(x16,x17); \
PUSH(x14,x15); \
PUSH(x12,x13); \
PUSH(x10,x11); \
PUSH(x8,x9); \
PUSH(x6,x7); \
PUSH(x4,x5); \
PUSH(x2,x3); \
PUSH(x0,x1); \
mrs x0, SPSR_EL1; str x0, [sp, #-8]!; \
mrs x0, ELR_EL1; str x0, [sp, #-8]!
#define POPA() \
ldr x0, [sp], #8; msr ELR_EL1, x0; \
ldr x0, [sp], #8; msr SPSR_EL1, x0; \
POP(x0,x1);\
POP(x2,x3);\
POP(x4,x5);\
POP(x6,x7);\
POP(x8,x9);\
POP(x10,x11);\
POP(x12,x13);\
POP(x14,x15);\
POP(x16,x17);\
POP(x29,x30) // FP and LR
.macro pad len, sym
.rept (len-(.-sym))/4
b .
.endr
.endm
.extern vector_handler_irq
.extern vector_handler_fiq
.extern vector_handler_sync_u64
.section VECTORS
// 4x handlers for the current mode (supervisor), but SP_EL0 is active
vector_cur_sp0_sync:
b .
.rept (0x80-(.-vector_cur_sp0_sync))/4
b .
.endr
vector_cur_sp0_irq:
b .
.rept (0x80-(.-vector_cur_sp0_irq))/4
b .
.endr
vector_cur_sp0_fiq:
b .
.rept (0x80-(.-vector_cur_sp0_fiq))/4
b .
.endr
vector_cur_sp0_serror:
b .
.rept (0x80-(.-vector_cur_sp0_serror))/4
b .
.endr
// 4x handlers for the current mode (supervisor)
vector_cur_sync:
PUSHA()
mrs x0, ESR_EL1
mov x1, sp
bl vector_handler_sync_k
POPA()
.rept (0x80-(.-vector_cur_sync))/4
b .
.endr
vector_cur_irq:
PUSHA()
bl vector_handler_irq
POPA()
eret
.rept (0x80-(.-vector_cur_irq))/4
b .
.endr
vector_cur_fiq:
PUSHA()
bl vector_handler_fiq
POPA()
eret
.rept (0x80-(.-vector_cur_fiq))/4
b .
.endr
vector_cur_serror:
b .
.rept (0x80-(.-vector_cur_serror))/4
b .
.endr
// 4x handlers for a lower mode (user), AArch64
vector_lower64_sync:
// Save caller-save state
PUSHA()
mrs x0, ESR_EL1
mov x1, sp
bl vector_handler_sync_u64
POPA()
eret
.rept (0x80-(.-vector_lower64_sync))/4
b .
.endr
vector_lower64_irq:
PUSHA()
bl vector_handler_irq
POPA()
eret
.rept (0x80-(.-vector_lower64_irq))/4
b .
.endr
vector_lower64_fiq:
PUSHA()
bl vector_handler_fiq
POPA()
eret
.rept (0x80-(.-vector_lower64_fiq))/4
b .
.endr
vector_lower64_serror:
b .
.rept (0x80-(.-vector_lower64_serror))/4
b .
.endr
// 4x handlers for a lower mode (user), AArch32
vector_lower32_sync:
b .
.rept (0x80-(.-vector_lower32_sync))/4
b .
.endr
vector_lower32_irq:
PUSHA()
bl vector_handler_irq
POPA()
eret
.rept (0x80-(.-vector_lower32_irq))/4
b .
.endr
vector_lower32_fiq:
PUSHA()
bl vector_handler_fiq
POPA()
eret
.rept (0x80-(.-vector_lower32_fiq))/4
b .
.endr
vector_lower32_serror:
b .
.rept (0x80-(.-vector_lower32_serror))/4
b .
.endr
//.section .inittext
.section .text
.extern hexdump
.extern kmain
.globl start
start:
ldr w0, =0x1badb002
cmp w0, w13
beq 1f
// TODO: What to do if we weren't loaded by our loader
// - For now, we return
ret
1:
// R9: UART Address
// R10: FDT base address
// R11: Symbol information base
// R12: End of used RAM
// R13: Magic
// 0. Print a '\n' to the serial port
mov w1, #'T' ; str w1, [x9]
mov w1, #'i' ; str w1, [x9]
mov w1, #'f' ; str w1, [x9]
mov w1, #'f' ; str w1, [x9]
mov w1, #'l' ; str w1, [x9]
mov w1, #'i' ; str w1, [x9]
mov w1, #'n' ; str w1, [x9]
mov w1, #'\n'; str w1, [x9]
// To get RAM start: subtract linked address of current instruction from real address
ldr x0, =(1f-KERNEL_BASE)
bl 1f
1:
sub x8, x30, x0
// Save bootloader information in various globals
ldr x0, =(kernel_phys_start - KERNEL_BASE)
add x0,x0, x8
str x8, [x0]
ldr x0, =(dt_phys_base - KERNEL_BASE)
add x0,x0, x8
str x10, [x0]
ldr x0, =(symbol_info_phys - KERNEL_BASE)
add x0,x0, x8
str x11, [x0]
ldr x0, =(ram_first_free - KERNEL_BASE)
add x0,x0, x8
str x12, [x0]
mov x12, x8
prep_page_tables:
ldr x0, =(kernel_root-KERNEL_BASE)
add x0, x0, x12
// - Mutate all populated fields in the tables - offset by RAM base
ldr x4, =(kernel_maps_start-KERNEL_BASE)
add x4, x4, x12
ldr x5, =kernel_maps_len
1:
ldr x3, [x4], #8
cmp x3, #0
beq 2f
add x3, x3, x12
str x3, [x4, #-8]
2:
subs x5, x5, #8
bne 1b
// Create an identity mapping before enabling paging
orr x1, x12, #0x001 // R1 = R12 | (1<<10) | 1 (Valid, AF, Block, Kernel RWX)
orr x1, x1, #0x400
lsr x2, x12, #25 // R2 = (R12 / 32MB)
// - Split x2 into two values, indexing Lv1 and Lvl2
lsr x6, x2, #11
and x6, x6, #0x7FF
and x2, x2, #0x7FF
// - Set entries in the top and next level of the low mappings
ldr x3, =(user0_root-KERNEL_BASE) ; add x3, x3, x12
ldr x4, =(user0_tab2-KERNEL_BASE) ; add x4, x4, x12
add x5, x4, #0x403 // Valid, Table, Kernel RWX
str x5, [x3,x6,LSL 3]
str x1, [x4,x2,LSL 3]
// NOTE: x2 kept for clearing after vmsa_setup
// X0: Physical address of kernel_root
// X3: Physical address of user0_root
vmsa_setup:
msr TTBR0_EL1, x3
msr TTBR1_EL1, x0
// Translation Control Register
// 34:32 - IPS = 5 (48 bits)
// 31:30 - TG1 = 1 (16KB Pages for TTBR1)
// 29:28 - SH1 = 0 (Non-sharable)
// 27:26 - ORGN1 = 0
// 25:24 - IRGN1 = 0
// 23 - EPD1 = 0 (enabled)
// 22 - A1 = 0 (TTBR0 gives ASID)
// 21:16 - T1SZ = 17 (47 bits)
// 15:14 - TG0 = 2 (16KB pages for TTBR0)
// 13:12 - SH0 = 0 (non-sharable)
// 11:10 - ORGN0 = 0
// 9: 8 - IRGN0 = 0
// 7 - EPD0 = 0 (enabled)
// 5: 0 - T0SZ = 17 (47 bits)
ldr x1, =0x540118011
msr TCR_EL1, x1
isb
// Populate the first HWMapping address with the UART's base
orr x0, x9, #0x3
orr x0, x0, #0x400
ldr x1, =(kernel_hwmap_level3+0 - KERNEL_BASE)
add x1, x1, x12
str x0, [x1]
ldr x0, =0xFFFFFFB000000000 + 2*0x4000
mov sp, x0
ldr x0, =kmain
ldr x1, =CPU0_STATE
msr TPIDR_EL1, x1
// 4 - SA0 = 1 (SP alignment check)
// 3 - SA = 1 (SP alignment check)
// 2 - C = 0
// 1 - A = 1 (Alignment check on)
// 0 - M = 1 (MMU on)
ldr x1, =0x1B
msr SCTLR_EL1, x1
isb
// --- Virtual memory is now enabled! ---
// Clear the identity mapping in user0_tab2 (x2 is still the index)
ldr x4, =user0_tab2
mov x1, 0
str x1, [x4, x2, LSL 3]
ldr x1, =(vector_cur_sp0_sync)
msr VBAR_EL1, x1
mov x29, #0 // Clear FP so kernel backtrace code knows to terminate cleanly
br x0
.section .text
ENTRY(thread_trampoline)
//.fnstart
//.cantunwind
POP(x1, x0) // X1: "thread_root" (generic over closure type), X0: Pop pointer to the closure
br x1
//.fnend
// pub fn task_switch(old_stack: &mut usize, new_stack: usize, new_ttbr0: usize);
// R0: Old stack save location
// R1: New stack
// R2: New TTBR0
ENTRY(task_switch)
//.fnstart
//.cantunwind
// Save callee-save state (19-30)
PUSH(x19, x20)
PUSH(x21, x22)
PUSH(x23, x24)
PUSH(x25, x26)
PUSH(x27, x28)
PUSH(x29, x30)
// Save user state (User SP and ELR)
mrs x5, SP_EL0
mrs x6, TPIDR_EL0
PUSH(x5, x6)
// Save ELR (exception return)
mrs x5, ELR_EL1
PUSH(x5, x6)
// Save SP to provided location
mov x4, sp
str x4, [x0]
// Update VMM root
msr TTBR0_EL1, x2
ldr x0, =(0 << 48)
tlbi ASIDE1, x0 // TODO: Check if this ignores globals?
// Set new SP
mov sp, x1
// Restore ELR
POP(x5,x6)
msr ELR_EL1, x5
// Restore user
POP(x5,x6)
msr SP_EL0, x5
msr TPIDR_EL0, x6
// Restore callee state
POP(x29,x30)
POP(x27,x28)
POP(x25,x26)
POP(x23,x24)
POP(x21,x22)
POP(x19,x20)
ret
//.fnend
//@ pub fn drop_to_user(entry: usize, stack: usize, cmdline_len: usize) -> !;
//@ R0: entry
//@ R1: stack
//@ R2: cmdline_len
ENTRY(drop_to_user)
//.fnstart
//.cantunwind
msr SPSel, #0
mov sp, x1
msr SPSel, #1
msr ELR_EL1, x0
mov x1, #0 // SPSR initialisation
msr SPSR_EL1, x1
mov x0, x2 // Set R0 = commandline length
eret
//.fnend
#include "../../../../Usermode/rustrt0/armv8-helpers.S"
ENTRY(__aeabi_unwind_cpp_pr0)
ENTRY(__aeabi_unwind_cpp_pr1)
b .
.section .rodata
data_abort_message: .ascii "Data Abort: "
data_abort_message_end:
data_abort_message2: .ascii "\n"
data_abort_message2_end:
.section .data
GLOBAL(dt_phys_base) .quad 0 // (Firmware) Device Tree base location
GLOBAL(kernel_phys_start).quad 0 // Start of kernel in RAM
GLOBAL(ram_first_free) .quad 0
GLOBAL(symbol_info_phys).quad 0
.section .bss
.space 0x1000, 0
abort_stack:
.section .pabss, "aw", @nobits
init_stack_base:
.space 0x4000, 0 // 1 page = 16K
.globl user0_tab2
user0_tab2:
.space 0x4000, 0
.globl kernel_temp_mappings
kernel_temp_mappings:
.space 0x4000, 0
// Page Aligned data
.section .padata
// - Top level table (lvl1), 16KB (one page), covering 2048 entries of 64GB each
// > 47 bits total
.globl kernel_maps_start
kernel_maps_start:
.globl user0_root
user0_root:
.rept 2048-1
.quad 0
.endr
.quad (user0_root-KERNEL_BASE)+0x403 // 0x0000_7FF0_0000_0000 - Fractal
.globl kernel_root
kernel_root:
.quad (kernel_image_level2-KERNEL_BASE)+0x403
.rept 2048-1-5
.quad 0
.endr
.quad (kernel_stack_level2-KERNEL_BASE)+0x403 // 0xFFFF_FFB0_0000_0000 - Stacks
.quad (kernel_hwmap_level2-KERNEL_BASE)+0x403 // 0xFFFF_FFC0_0000_0000 - Hardware
.quad 0 // 0xFFFF_FFD0_0000_0000 - UNUSED
.quad (kernel_root-KERNEL_BASE)+0x403 // 0xFFFF_FFE0_0000_0000 - Fractal
.quad (kernel_temp_level2-KERNEL_BASE)+0x403 // 0xFFFF_FFF0_0000_0000 - Temp
// - Level 2 table for kernel image, 32MB per entry
kernel_image_level2:
.quad 0+0x401 // Kernel image "identity" map, Priv RW only
.rept 2048-1
.quad 0
.endr
// - Level 2 table for the kernel stack
kernel_stack_level2:
.quad (kernel_stack_level3-KERNEL_BASE)+0x403
.rept 2048-1
.quad 0
.endr
// - Level 2 table for hardware mappings
kernel_hwmap_level2:
.quad (kernel_hwmap_level3-KERNEL_BASE)+0x403
.rept 2048-1
.quad 0
.endr
// - Level 2 table for temporary mappings
kernel_temp_level2:
.quad (kernel_temp_mappings-KERNEL_BASE)+0x403 // 0xFFFF_FFF0_0000_0000
.rept 2048-1 // 0xFFFF_FFF0_0200_0000+ - Unused
.quad 0
.endr
// - Level 3 table for kernel stacks
kernel_stack_level3:
.quad 0
.quad (init_stack_base-KERNEL_BASE)+0*0x4000+0x403
.rept 2048-2
.quad 0
.endr
// - Level 3 table for hardware mappings
.globl kernel_hwmap_level3
kernel_hwmap_level3:
.rept 2048
.quad 0
.endr
.globl kernel_maps_end
kernel_maps_end:
// vim: ft=armasm
|
VidyaBipin/rust_os
| 12,064
|
Kernel/Core/arch/armv7/start.S
|
@
@
@
#define KERNEL_BASE 0x80000000
#define ENTRY(v) .globl v; .type v,"function"; v:
#define GLOBAL(v) .globl v; v:
#if 1 || defined(PLATFORM_qemuvirt)
# define UART_BASE 0x09000000
# define RAM_START 0x40000000
#elif defined(PLATFORM_realviewpb)
# define UART_BASE 0x10009000
# define RAM_START 0x00000000
#endif
.section VECTORS
ivt_reset: b rst_start @ 0x00 Reset
ivt_undef: ldr pc, =ud_abort @ 0x04 #UD
ivt_svc: ldr pc, =svc_handler @ 0x08 SVC (used to be called SWI)
ivt_prefetch: ldr pc, =prefetch_abort @ 0x0C Prefetch abort
ivt_data: ldr pc, =data_abort @ 0x10 Data abort
ivt_unused: b . @ 0x14 Not Used
ivt_irq: ldr pc, =irq_handler @ 0x18 IRQ
ivt_fiq: ldr pc, =fiq_handler @ 0x1C FIQ (Fast interrupt)
rst_start:
ldr pc, = start-KERNEL_BASE
//.section .inittext
.section .text
.extern hexdump
.extern kmain
.globl start
start:
ldr r0, =0x1badb002
teq r0, r13
beq 1f
@ TODO: What to do if we weren't loaded by our loader
@ - For now, we return
mov pc,lr
b .
1:
@ R9: UART Address
@ R10: FDT base address
@ R11: Symbol information base
@ R12: End of used RAM
@ R13: Magic
// 0. Print a '\n' to the serial port
mov r1, #'T' ; str r1, [r9]
mov r1, #'i' ; str r1, [r9]
mov r1, #'f' ; str r1, [r9]
mov r1, #'f' ; str r1, [r9]
mov r1, #'l' ; str r1, [r9]
mov r1, #'i' ; str r1, [r9]
mov r1, #'n' ; str r1, [r9]
mov r1, #10 ; str r1, [r9]
// To get RAM start: subtract linked address of current instruction from real address
ldr r0, =(get_ram_base-0x80000000+4)
sub r8, pc, r0
get_ram_base:
ldr r0, =(kernel_phys_start - KERNEL_BASE)
add r0, r8
str r8, [r0]
ldr r0, =(dt_phys_base - KERNEL_BASE)
add r0, r8
str r10, [r0]
ldr r0, =(symbol_info_phys - KERNEL_BASE)
add r0, r8
str r11, [r0]
ldr r0, =(ram_first_free - KERNEL_BASE)
add r0, r8
str r12, [r0]
mov r12, r8
prep_page_tables:
// 1. Prepare VMSA State
ldr r0, =(kernel_table0-KERNEL_BASE)
add r0, r12
// - Prepare page tables (offset with RAM base)
mov r4, r0
ldr r5, =kernel_maps_len
1:
ldr r3, [r4]
cmp r3, #0 @ 0x1000
beq 2f
add r3, r12
2:
str r3, [r4], #4
subs r5, #4
bne 1b
orr r1, r12, #0x400
orr r1, r1, #0x002
lsr r2, r12, #20
lsl r2, r2, #2
ldr r3, [r0,r2]
cmp r3, #0
bne ram_mapping_collision
str r1, [r0,r2]
vmsa_setup:
mcr p15, 0, r0, c2, c0, 1 @ Set TTBR1 to r0
mcr p15, 0, r0, c2, c0, 0 @ Set TTBR0 to r0 too (for identity)
mov r0, #1
mcr p15, 0, r0, c2, c0, 2 @ Set TTCR to 1 (50/50 split)
mov r0, #3
mcr p15, 0, r0, c3, c0, 0 @ Set Domain 0 to Manager
@ Enable VMSA
mrc p15, 0, r0, c1, c0, 0
orr r0, r0, #1
orr r0, r0, #1 << 23
mcr p15, 0, r0, c1, c0, 0
mov r0, #1
mcr p15, 0, r0, c13, c0, 1 @ HACK: Set ASID to non zero
mov r0, #0x55 @ 01010101b
mcr p15, 0, r0, c3, c0, 0 @ Enable access faults on domains 0 & 1
// NOTE: VMSA is active here, so virtual addresses can be used
@
@ Check for security extensions
@
mrc p15, 0, r0, c0, c1, 1
and r0, #0xF0
beq 1f
@ - Present
ldr r0,=0xFFFF0000
mcr p15, 0, r0, c12, c0, 0 @ Set the VBAR (brings exceptions into high memory)
b 2f
1:
@ - Absent
mrc p15, 0, r0, c1, c0, 0 @ Set SCTLR.V
orr r0, #0x2000
mcr p15, 0, r0, c1, c0, 0
2:
@ Populate the first HWMapping address with the UART's base
add r0, r9, #0x13
ldr r1, =hwmap_table_0+0
str r0, [r1]
cps #23 @ Switch to 'abort' mode
ldr sp, =abort_stack
cps #19 @ Back to supervisor
ldr sp, =init_stack
ldr pc, =kmain
@ If the start of RAM fell on an occupied section of the virtual address space
ram_mapping_collision:
mov r0, #'R'; str r0, [r9]
mov r0, #'A'; str r0, [r9]
mov r0, #'M'; str r0, [r9]
mov r0, #'!'; str r0, [r9]
mov r0, #'\n'; str r0, [r9]
b .
@
@ MAGIC MACRO
@
@ TODO: use https://sourceware.org/binutils/docs/as/ARM-Directives.html
.macro EXIDX method handle
.long EXIDX_\method
.section .ARM.exidx.\method, #exidx
.globl EXIDX_\method
EXIDX_\method: .long \method - . - 0x80000000, \handle
.section .text
.endm
.section .text
ENTRY(thread_trampoline)
.fnstart
.cantunwind
pop {r1} @ "thread_root" (generic over closure type)
pop {r0} @ Pop pointer to the closure
bx r1
.fnend
@ R0: Old stack save location
@ R1: New stack
@ R2: New TTBR0
@ R3: New Thread pointer
ENTRY(task_switch)
.fnstart
.cantunwind
push {r4-r12,lr}
mov r4, sp
cps #0x1F
stmfd r4!, {sp,lr}
cps #0x13
@ Save SP
str r4, [r0]
@ Only update TTBR0 if the task has an explicit address space
movs r2, r2
mcrne p15,0, r2, c2,c0,0 @ Set TTBR0 to r2
mov r2, #1
mcrne p15,0, r2, c8,c7,0 @ TLBIALL - Invalid user space
@ Set new thread pointer
mcr p15, 0, r3, c13,c0,4 @ TPIDRPRW
@ Set new SP
mov r4, r1
cps #0x1F
ldmfd r4!, {sp,lr}
cps #0x13
mov sp, r4
@ Restore state
pop {r4-r12,pc}
.fnend
@ pub fn drop_to_user(entry: usize, stack: usize, cmdline_len: usize) -> !;
@ R0: entry
@ R1: stack
@ R2: cmdline_len
ENTRY(drop_to_user)
.fnstart
.cantunwind
cps #0x1F @ Switch to "System" to set user SP
mov sp, r1
cps #0x13 @ 0x13 = supervisor (kernel)
mov r1, #0x10 @ -
push {r0,r1} @ Push user entrypoint and operating mode (r1)
mov r0, r2 @ Set R0 = commandline length
rfefd sp!
.fnend
ENTRY(prefetch_abort)
srsfd sp!, #0x17 @ Save state, using 'abort' mode stack
push {r0-r12} @ SP, LR, and PC not pushed
bl get_abort_sp_lr
push {r1,r2} @ Source SP and LR
ldr r0, [sp, #4*(2+13)]
sub r0, #8
mov r1, sp
mrc p15,0, r2, c5,c0,1
bl prefetch_abort_handler
add sp, #8 @ Undo saving of SP/LR to stack
pop {r0-r12}
rfefd sp!
.long EXIDX_prefetch_abort
EXIDX prefetch_abort, prefetch_abort_EXTAB - . - 0x80000000
.section .ARM.extab.prefetch_abort
GLOBAL(prefetch_abort_EXTAB)
.long 0x81028600 @ POP {SP, LR}
.long 0x81FFB10F @ POP {r4-r12}, POP {r0-r3}
.long 0x02B0B0B0 @ VSP+=12, END
.section .text
ENTRY(data_abort)
.fnstart
.cantunwind
srsfd sp!, #0x17 @ Save state, using 'abort' mode stack
push {r0-r12} @ SP, LR, and PC not pushed
mov r3, lr
bl get_abort_sp_lr
push {r1,r2} @ Source SP and LR
ldr r0, [sp, #4*(2+13)]
sub r0, #8
mov r1, sp
mrc p15,0, r2, c6,c0,0
mrc p15,0, r3, c5,c0,0
bl data_abort_handler
add sp, #8 @ Undo saving of SP/LR to stack
pop {r0-r12}
rfefd sp!
.fnend
ENTRY(ud_abort)
.fnstart
.cantunwind
srsfd sp!, #0x17 @ Save state, using 'abort' mode stack ([0]=LR_curr, [1]=SPSR)
cps #0x17 @ UD Abort is mode 0x1b, but that's an extra stack...
push {r0-r12} @ SP, LR, and PC not pushed
bl get_abort_sp_lr
push {r1,r2} @ Source SP and LR
ldr r0, [sp, #4*(2+13)]
sub r0, #4
mov r1, sp
bl ud_abort_handler
add sp, #8 @ Undo saving of SP/LR to stack
pop {r0-r12}
rfefd sp!
.fnend
get_abort_sp_lr:
ldr r0, [sp, #4*(13+1)] @ 13 GPRs, LR, [SPSR]
and r0, #0x1F
cmp r0, #0x10 @ 0x10 = user
cmpne r0, #0x13 @
cmpne r0, #0x17 @
bne 3f
cmp r0, #0x10 @ 0x10 = user
bne 1f
cps #0x1F @ 0x1F = "System" (user regs, kernel privs)
mov r1, sp
mov r2, lr
b 2f
1:
cmp r0, #0x17 @ 0x17 = abort
bne 1f
sub r1, sp, #4*(13+2)
ldr r2, [sp, #13*4]
b 2f
1:
cps #0x13 @ 0x13 = supervisor (kernel)
mov r1, sp
mov r2, lr
@b 2f
2:
cps #23 @ Switch back to abort mode
bx lr
3:
b .
ENTRY(svc_handler)
.fnstart
.cantunwind
srsfd sp!, #0x13 @ Save state to stack
push {r0-r5}
mov r0, r12 @ R12 is the call ID
mov r1, sp @ Args have been pushed to kernel stack
mov r2, #6 @ 6 of them
bl syscalls_handler
@ r0,r1 return value
add sp, #6*4
rfefd sp!
.fnend
ENTRY(irq_handler)
.fnstart
.cantunwind
sub lr, #4 @ Adjust LR to the correct value
srsfd sp!, #0x13 @ Save state to supervisor stack
cps #0x13 @ TODO: Check if IRQs are still off aftert this
push {r0-r12} @ Save GPRs
push {r0,r14} @ Save kernel LR (and r0 again for alignment)
sub sp, #8
stm sp, {sp,lr}^ @ Save userland banked registers (can't write back)
mov r0, sp
bl interrupt_handler
ldmfd sp, {sp,lr}^ @ Note: doesn't support write-back
add sp, #8
pop {r0,r14}
pop {r0-r12}
rfefd sp!
.fnend
ENTRY(fiq_handler)
.fnstart
.cantunwind
srsfd sp!, #0x13 @ Save state to supervisor stack
cps #0x13
push {r0-r12}
mov r0, sp
bl interrupt_handler
pop {r0-r12}
rfefd sp!
.fnend
ENTRY(memcpy)
movs r2, r2
bxeq lr
1:
LDRB r3, [r1], #1
STRB r3, [r0], #1
SUBS r2, r2, #1
BGT 1b
BX lr
EXIDX memcpy, 0x80B0B0B0
ENTRY(memmove)
movs r2, r2
bxeq lr
cmp r0,r1
blt __aeabi_memcpy @ DST < SRC, memcpy
adds r3,r1,r2
cmp r0,r3
bgt __aeabi_memcpy @ DST > SRC+NUM, memcpy
add r0,r2
add r1,r2
1: @ Reverse copy
LDRB r3, [r1,#-1]!
STRB r3, [r0,#-1]!
SUBS r2, r2, #1
BGT 1b
BX lr
EXIDX memmove, 0x80B0B0B0
ENTRY(memset)
.fnstart
movs r2, r2
beq 2f
1:
strb r1, [r0], #1
subs r2, #1
bne 1b
2:
bx lr
.fnend
// A, B, num
ENTRY(memcmp)
.fnstart
.save {r4}
push {r4}
movs r2,r2
mov r3, #0
mov r4, #0
beq 2f
1:
ldrb r3, [r0], #1
ldrb r4, [r1], #1
cmp r4, r3
bne 2f
subs r2, #1
bne 1b
2:
movhs r0, #1
movlo r0, #-1
moveq r0, #0
pop {r4}
mov pc, lr
.fnend
ENTRY(__aeabi_unwind_cpp_pr0)
ENTRY(__aeabi_unwind_cpp_pr1)
b .
#if 0
ENTRY(__aeabi_dcmplt)
ENTRY(__aeabi_dcmple)
ENTRY(__aeabi_dcmpeq)
ENTRY(__aeabi_dcmpge)
ENTRY(__aeabi_dcmpgt)
b .
ENTRY(__aeabi_fcmplt)
ENTRY(__aeabi_fcmple)
ENTRY(__aeabi_fcmpeq)
ENTRY(__aeabi_fcmpge)
ENTRY(__aeabi_fcmpgt)
b .
#endif
EXIDX __aeabi_unwind_cpp_pr0, 0x1
ENTRY(_Unwind_Resume)
b .
.section .rodata
data_abort_message: .ascii "Data Abort: "
data_abort_message_end:
data_abort_message2: .ascii "\n"
data_abort_message2_end:
.section .data
GLOBAL(dt_phys_base) .long 0 @ (Firmware) Device Tree base location
GLOBAL(kernel_phys_start).long 0 @ Start of kernel in RAM
GLOBAL(ram_first_free) .long 0
GLOBAL(symbol_info_phys) .long 0
.section .bss
init_stack_base:
.space 0x20000, 0
init_stack:
.space 0x1000, 0
abort_stack:
// Page Aligned data
.section .padata
.globl kernel_table0
kernel_table0:
.long 0x00000402 @ Identity map the first 1 MiB
.rept 0x800 - 1 - 8
.long 0
.endr
.long user_last_map - KERNEL_BASE + 0x0000 + 1
.long user_last_map - KERNEL_BASE + 0x0400 + 1
.long user_last_map - KERNEL_BASE + 0x0800 + 1
.long user_last_map - KERNEL_BASE + 0x0C00 + 1
.long user_last_map - KERNEL_BASE + 0x1000 + 1
.long user_last_map - KERNEL_BASE + 0x1400 + 1
.long user_last_map - KERNEL_BASE + 0x1800 + 1
.long user_last_map - KERNEL_BASE + 0x1C00 + 1
@ 0x80000000 - User/Kernel split
.long 0x00000000 + 0x402 @ Map first 8 MiB to 2GiB (KRW only)
.long 0x00100000 + 0x402 @
.long 0x00200000 + 0x402 @
.long 0x00300000 + 0x402 @
.long 0x00400000 + 0x402 @
.long 0x00500000 + 0x402 @
.long 0x00600000 + 0x402 @
.long 0x00700000 + 0x402 @
.rept 0xF00 - 0x800 - 8
.long 0
.endr
@ - 0xF00_00000
.rept 16
.long 0
.endr
@ - 0xF10_00000
.long hwmap_table_0 - KERNEL_BASE + 0x000 + 1
.long hwmap_table_0 - KERNEL_BASE + 0x400 + 1
.long hwmap_table_0 - KERNEL_BASE + 0x800 + 1
.long hwmap_table_0 - KERNEL_BASE + 0xC00 + 1
@ - 0xF14_00000
.rept 0xFF8 - 0xF00 - 16 - 4
.long 0
.endr
@ Page fractals and vectored exceptions
.long 0, 0, 0, 0
.long kernel_exception_map - KERNEL_BASE + 0x000 + 1
.long kernel_exception_map - KERNEL_BASE + 0x400 + 1
.long kernel_exception_map - KERNEL_BASE + 0x800 + 1
.long kernel_exception_map - KERNEL_BASE + 0xC00 + 1
user_last_map:
.rept 1024
.long 0
.endr
@ - 4MB for the kernel's use, 1020 places for temp table mappings
.rept 1024-4
.long 0
.endr
.long user_last_map - KERNEL_BASE + 0x0000 + 0x13
.long user_last_map - KERNEL_BASE + 0x1000 + 0x13
.long kernel_table0 - KERNEL_BASE + 0x0000 + 0x13
.long kernel_table0 - KERNEL_BASE + 0x1000 + 0x13
.globl hwmap_table_0
hwmap_table_0:
.long 0 @ Will be filled with UART base
.rept 1023
.long 0
.endr
.globl kernel_exception_map
kernel_exception_map:
@ First 1008 entries are empty (for use with kernel-side page tables)
.rept 1024-16
.long 0
.endr
.long 0x00000000 + 0x212 @ Exceptions at 0xFFFF0000, re-map first page
.rept 16-1-1
.long 0
.endr
.long 0 @ ALWAYS zero, to catch NULL-1 indexing
.globl kernel_maps_end
kernel_maps_end:
@ vim: ft=armasm
|
VidyaBipin/rust_os
| 1,940
|
Kernel/rundir/arm_bootloader/start.S
|
// "platform-$(PLATFORM).h" is inserted by cpp
#define STACK_TOP (RAM_START+RAM_LENGTH)
#define ENTRY(n) .globl n ; n:
#include "../../../Usermode/rustrt0/armv7-helpers.S"
.extern _binary_______bin_kernel_armv7_bin_start
.section .text
b .
b .
b .
b .
b .
ENTRY(start)
mov sp, #STACK_TOP
mov r0, #UART_BASE
push {r0}
ldr r8, =_binary_______bin_kernel_armv7_bin_start
@ Get the size of the kernel image
mov r0, r8
bl elf_get_size
mov r12, r0
add r11, r0, #RAM_START
#if defined(FDT_BASE)
mov r10, #0 @ Pre-set FDT base register to 0 (invalid)
ldr r0, =FDT_BASE
ldr r1, [r0]
ldr r2, =0xedfe0dd0
teq r1, r2
bne 1f
ldr r1, [r0, #4]
rev r1, r1
@ R1 = Size of FDT
mov r10, r11 @ Save FDT base
add r11, r11, r1 @ Increment free RAM base by size of FDT
mov r2, r1 @ Count
mov r1, r0 @ Source
mov r0, r10 @ Destination
bl __aeabi_memcpy4
1:
#else
.extern fdt_start
.extern fdt_len
ldr r2, =fdt_end
ldr r1, =fdt_start
sub r2, r2, r1
mov r10, r11 @ Save FDT base
add r11, r11, r2 @ Increment free RAM base by size of FDT
mov r0, r10 @ Destination
bl __aeabi_memcpy4
#endif
@ Load image
mov r0, r8
mov r1, #RAM_START
bl elf_load_segments
mov r9, r0 @ Save entrypoint
@ Load symbols
mov r0, r8
mov r1, r11
bl elf_load_symbols
add r12, r11, r0
mov r8, r9
@ R8: Kernel entrypoint
@ R9: UART Address
@ R10: FDT base address
@ R11: Symbol information base
@ R12: End of used RAM
@ R13: Magic
ldr r9, =UART_BASE
ldr r13, =0x1badb002 @ Just ninja the mulitboot magic value
blx r8
@mov pc, r8
ldr r0, =string_boot_fail
mov r1, #(5+7+2)
bl puts
b .
ENTRY(putb)
mov r1, #STACK_TOP
ldr r1, [r1, #-4]
strb r0, [r1]
bx lr
@ puts(*const u8, usize)
ENTRY(puts)
mov r2, #STACK_TOP
ldr r2, [r2, #-4]
movs r1, r1
beq 2f
1:
ldrb r3, [r0], #1
strb r3, [r2]
subs r1, #1
bne 1b
2:
bx lr
.section .rodata
string_boot_fail:
.string "Boot failure.\n"
// vim: ft=armasm
|
VidyaBipin/rust_os
| 1,815
|
Bootloaders/arm/start.S
|
// "platform-$(PLATFORM).h" is inserted by cpp
#define STACK_TOP (RAM_START+RAM_LENGTH)
#define ENTRY(n) .globl n ; n:
.extern _binary_______Kernel_bin_kernel_armv7_bin_start
.section .text
b .
b .
b .
b .
b .
ENTRY(start)
mov sp, #STACK_TOP
ldr r0, =UART_BASE
push {r0}
ldr r8, =_binary_______Kernel_bin_kernel_armv7_bin_start
@ Get the size of the kernel image
mov r0, r8
bl elf_get_size
mov r12, r0
add r11, r0, #RAM_START
#if defined(FDT_BASE)
mov r10, #0 @ Pre-set FDT base register to 0 (invalid)
ldr r0, =FDT_BASE
ldr r1, [r0]
ldr r2, =0xedfe0dd0
teq r1, r2
bne 1f
ldr r1, [r0, #4]
rev r1, r1
@ R1 = Size of FDT
mov r10, r11 @ Save FDT base
add r11, r11, r1 @ Increment free RAM base by size of FDT
mov r2, r1 @ Count
mov r1, r0 @ Source
mov r0, r10 @ Destination
bl __aeabi_memcpy4
1:
#else
.extern fdt_start
.extern fdt_len
ldr r2, =fdt_end
ldr r1, =fdt_start
sub r2, r2, r1
mov r10, r11 @ Save FDT base
add r11, r11, r2 @ Increment free RAM base by size of FDT
mov r0, r10 @ Destination
bl __aeabi_memcpy4
#endif
@ Load image
mov r0, r8
mov r1, #RAM_START
bl elf_load_segments
mov r9, r0 @ Save entrypoint
@ Load symbols
mov r0, r8
mov r1, r11
bl elf_load_symbols
add r12, r11, r0
mov r8, r9
@ R8: Kernel entrypoint
@ R9: UART Address
@ R10: FDT base address
@ R11: Symbol information base
@ R12: End of used RAM
@ R13: Magic
ldr r9, =UART_BASE
ldr r13, =0x1badb002 @ Just ninja the mulitboot magic value
mov pc, r8
#include "../../Usermode/rustrt0/armv7-helpers.S"
ENTRY(putb)
mov r1, #STACK_TOP
ldr r1, [r1, #-4]
strb r0, [r1]
bx lr
@ puts(*const u8, usize)
ENTRY(puts)
mov r2, #STACK_TOP
ldr r2, [r2, #-4]
movs r1, r1
beq 2f
1:
ldrb r3, [r0], #1
strb r3, [r2]
subs r1, #1
bne 1b
2:
bx lr
// vim: ft=armasm
|
VidyaBipin/rust_os
| 1,782
|
Bootloaders/arm/start-bcm2708.S
|
#define FDT_BASE 0x100 @ Placed here by the GPU
.extern bootloader_link_addr
.extern bootloader_size
.section .text
.globl start
start:
mov sp, #0x8000 @ Stick the stack just before the image
@ Relocate the bootloader so the kernel can be loaded to the bottom of RAM
mov r4, #0x8000 @ Source: Load address for bootloader
ldr r5, =bootloader_link_addr
mov r6, =bootloader_size
1:
ldmia r4, {r0-r3}
stmia r5, {r0-r3}
sub r6, #4*4
jge 1b
@ Bootloader relocated, determine kernel size and move FDT to above the kernel.
ldr r8, =_binary_______Kernel_bin_kernel_armv7_bin_start
@ Get the size of the kernel image
mov r0, r8
bl elf_get_size
mov r12, r0
add r12, #0xFFF
bic r12, #0xFFF
@ > R12: Page aligned kernel size
@ - Get the size of the FDT
mov r1, #0 @ Default FDT size to zero
ldr r0, =FDT_BASE
ldr r1, [r0]
ldr r2, =0xedfe0dd0
teq r1, r2
bne 1f
ldr r1, [r0, #4]
rev r1, r1
@ R1 = Size of FDT
1:
mov r10, r12 @ Save FDT address
mov r4, #FDT_BASE
mov r5, r10
mov r6, r1
1:
ldmia r4, {r0-r3}
stmia r5, {r0-r3}
sub r6, #4*4
jge 1b
mov r12, r5 @ Save the post-FDT position in R12
@ Now that the FDT is out of the way, load the kernel and symbols
mov r0, r8 @ < Kernel ELF file
mov r1, #0 @ < Load address
bl elf_load_segments
mov r9, r0 @ Save entrypoint
mov r11, r12 @ Save symbol address
@ Load symbols
mov r0, r8 @ < Kernel ELF file
mov r1, r12 @ < Post-FDT position
bl elf_load_symbols
add r12, r12, r0
mov r8, r9
@ R8: Kernel entrypoint
@ R9: UART Address
@ R10: FDT base address
@ R11: Symbol information base
@ R12: End of used RAM
@ R13: Magic
mov r8, r9 @ < Entrypoint saved here earlier
ldr r9, =UART_BASE
ldr r13, =0x1badb002 @ Just ninja the mulitboot magic value
mov pc, r8
; vim: ft=armasm
|
VidyaBipin/rust_os
| 2,324
|
Bootloaders/aarch64/start.S
|
// "platform-$(PLATFORM).h" is inserted by cpp
#define STACK_TOP (RAM_START+RAM_LENGTH)
#define ENTRY(n) .globl n ; n:
#include "../../Usermode/rustrt0/armv8-helpers.S"
.extern _binary_kernel_bin_start
.section .text
ENTRY(start)
mov x0, #STACK_TOP
mov sp, x0
mov x0, #UART_BASE
str x0, [sp, #-8]!
sub sp, sp, #8 // Align the stack
ldr x0, =string_init
mov x1, #(string_init_END-string_init)
bl puts
// Get the size of the kernel image
ldr x0, =_binary_kernel_bin_start
bl elf_get_size
mov x12, x0 // X12 = ELF Size
mov x1, #RAM_START
add x21, x0, x1 // X21 = End of loaded image
#if defined(FDT_BASE)
mov x20, #0 // Pre-set FDT base register to 0 (invalid)
ldr x0, =FDT_BASE
// Compare `*(FDT_BASE as *const u32) == 0xedfe0dd0`
ldr w1, [x0]
ldr w2, =0xedfe0dd0
cmp w1, w2
bne 1f
// X1 = Size of FDT
mov x1, #0
ldr w1, [x0, #4]
rev w1, w1
// R1 = Size of FDT
mov x20, x21 // Save FDT base
add x21, x21, x1 // Increment free RAM base by size of FDT
mov x2, x1 // Count
mov x1, x0 // Source
mov x0, x20 // Destination
bl __aeabi_memcpy4
1:
#else
.extern fdt_start
.extern fdt_len
ldr x2, =fdt_end
ldr x1, =fdt_start
sub x2, x2, x1
mov x20, x21 // Save FDT base
add x21, x21, x2 // Increment free RAM base by size of FDT
mov x0, x20 // Destination
bl __aeabi_memcpy4
#endif
// Load image
ldr x0, =_binary_kernel_bin_start
mov x1, #RAM_START
bl elf_load_segments
mov x19, x0 // Save entrypoint
// Load symbols
ldr x0, =_binary_kernel_bin_start
mov x1, x21
bl elf_load_symbols
add x12, x21, x0
mov x10, x20
mov x11, x21
// R8: Kernel entrypoint
// R9: UART Address
// R10: FDT base address
// R11: Symbol information base
// R12: End of used RAM
// R13: Magic
ldr x9, =UART_BASE
ldr x13, =0x1badb002 // Just ninja the mulitboot magic value
blr x19
ldr x0, =string_boot_fail
mov x1, #(string_boot_fail_END-string_boot_fail)
bl puts
b .
ENTRY(putb)
mov x1, #STACK_TOP
ldr x1, [x1, #-8]
strb w0, [x1]
ret
// puts(*const u8, usize)
ENTRY(puts)
mov x2, #STACK_TOP
ldr x2, [x2, #-8]
tst x1, x1
beq 2f
1:
ldrb w3, [x0], #1
strb w3, [x2]
subs x1,x1, #1
bne 1b
2:
ret
.section .rodata
string_init:
.string "=== BOOTING ===\n"
string_init_END:
string_boot_fail:
.string "Boot failure.\n"
string_boot_fail_END:
// vim: ft=armasm
|
violeteloiv/Solar-OS
| 4,459
|
src/boot.s
|
/* Rust's ASM block does not seem to default to at&t syntax. Force it with the
* following */
.att_syntax
/* Declare constants for the multiboot header. */
.set ALIGN, 1<<0 /* align loaded modules on page boundaries */
.set MEMINFO, 1<<1 /* provide memory map */
.set FLAGS, ALIGN | MEMINFO /* this is the Multiboot 'flag' field */
.set MAGIC, 0x1BADB002 /* 'magic number' lets bootloader find the header */
.set CHECKSUM, -(MAGIC + FLAGS) /* checksum of above, to prove we are multiboot */
/*
Declare a multiboot header that marks the program as a kernel. These are magic
values that are documented in the multiboot standard. The bootloader will
search for this signature in the first 8 KiB of the kernel file, aligned at a
32-bit boundary. The signature is in its own section so the header can be
forced to be within the first 8 KiB of the kernel file.
*/
.section .multiboot
.align 4
.long MAGIC
.long FLAGS
.long CHECKSUM
/*
The multiboot standard does not define the value of the stack pointer register
(esp) and it is up to the kernel to provide a stack. This allocates room for a
small stack by creating a symbol at the bottom of it, then allocating 16384
bytes for it, and finally creating a symbol at the top. The stack grows
downwards on x86. The stack is in its own section so it can be marked nobits,
which means the kernel file is smaller because it does not contain an
uninitialized stack. The stack on x86 must be 16-byte aligned according to the
System V ABI standard and de-facto extensions. The compiler will assume the
stack is properly aligned and failure to align the stack will result in
undefined behavior.
*/
.section .bss
.align 16
stack_bottom:
.skip 16380 # 16 KiB - 4 bytes
stack_top:
.skip 4 # We define stack top as the last element in our stack, but this is after all allocated space. Add another 4 bytes for one more element.
/*
The linker script specifies _start as the entry point to the kernel and the
bootloader will jump to this position once the kernel has been loaded. It
doesn't make sense to return from this function as the bootloader is gone.
*/
.section .text
.global _start
.type _start, @function
_start:
/*
The bootloader has loaded us into 32-bit protected mode on a x86
machine. Interrupts are disabled. Paging is disabled. The processor
state is as defined in the multiboot standard. The kernel has full
control of the CPU. The kernel can only make use of hardware features
and any code it provides as part of itself. There's no printf
function, unless the kernel provides its own <stdio.h> header and a
printf implementation. There are no security restrictions, no
safeguards, no debugging mechanisms, only what the kernel provides
itself. It has absolute and complete power over the
machine.
*/
/*
To set up a stack, we set the esp register to point to the top of the
stack (as it grows downwards on x86 systems). This is necessarily done
in assembly as languages such as C cannot function without a stack.
*/
mov $stack_top, %esp
/*
Multiboot Info
kernel_main expects multiboot magic, multiboot info
Multiboot spec says those come in eax, ebx, push in reverse order before calling.
*/
push %ebx
push %eax
/*
Enter the high-level kernel. The ABI requires the stack is 16-byte
aligned at the time of the call instruction (which afterwards pushes
the return pointer of size 4 bytes). The stack was originally 16-byte
aligned above and we've pushed a multiple of 16 bytes to the
stack since (pushed 0 bytes so far), so the alignment has thus been
preserved and the call is well defined.
*/
call kernel_main
/*
If the system has nothing more to do, put the computer into an
infinite loop. To do that:
1) Disable interrupts with cli (clear interrupt enable in eflags).
They are already disabled by the bootloader, so this is not needed.
Mind that you might later enable interrupts and return from
kernel_main (which is sort of nonsensical to do).
2) Wait for the next interrupt to arrive with hlt (halt instruction).
Since they are disabled, this will lock up the computer.
3) Jump to the hlt instruction if it ever wakes up due to a
non-maskable interrupt occurring or due to system management mode.
*/
cli
1: hlt
jmp 1b
/*
Set the size of the _start symbol to the current location '.' minus its start.
This is useful when debugging or when you implement call tracing.
*/
.size _start, . - _start
|
violeteloiv/Solar-OS
| 4,459
|
src/boot.s
|
/* Rust's ASM block does not seem to default to at&t syntax. Force it with the
* following */
.att_syntax
/* Declare constants for the multiboot header. */
.set ALIGN, 1<<0 /* align loaded modules on page boundaries */
.set MEMINFO, 1<<1 /* provide memory map */
.set FLAGS, ALIGN | MEMINFO /* this is the Multiboot 'flag' field */
.set MAGIC, 0x1BADB002 /* 'magic number' lets bootloader find the header */
.set CHECKSUM, -(MAGIC + FLAGS) /* checksum of above, to prove we are multiboot */
/*
Declare a multiboot header that marks the program as a kernel. These are magic
values that are documented in the multiboot standard. The bootloader will
search for this signature in the first 8 KiB of the kernel file, aligned at a
32-bit boundary. The signature is in its own section so the header can be
forced to be within the first 8 KiB of the kernel file.
*/
.section .multiboot
.align 4
.long MAGIC
.long FLAGS
.long CHECKSUM
/*
The multiboot standard does not define the value of the stack pointer register
(esp) and it is up to the kernel to provide a stack. This allocates room for a
small stack by creating a symbol at the bottom of it, then allocating 16384
bytes for it, and finally creating a symbol at the top. The stack grows
downwards on x86. The stack is in its own section so it can be marked nobits,
which means the kernel file is smaller because it does not contain an
uninitialized stack. The stack on x86 must be 16-byte aligned according to the
System V ABI standard and de-facto extensions. The compiler will assume the
stack is properly aligned and failure to align the stack will result in
undefined behavior.
*/
.section .bss
.align 16
stack_bottom:
.skip 16380 # 16 KiB - 4 bytes
stack_top:
.skip 4 # We define stack top as the last element in our stack, but this is after all allocated space. Add another 4 bytes for one more element.
/*
The linker script specifies _start as the entry point to the kernel and the
bootloader will jump to this position once the kernel has been loaded. It
doesn't make sense to return from this function as the bootloader is gone.
*/
.section .text
.global _start
.type _start, @function
_start:
/*
The bootloader has loaded us into 32-bit protected mode on a x86
machine. Interrupts are disabled. Paging is disabled. The processor
state is as defined in the multiboot standard. The kernel has full
control of the CPU. The kernel can only make use of hardware features
and any code it provides as part of itself. There's no printf
function, unless the kernel provides its own <stdio.h> header and a
printf implementation. There are no security restrictions, no
safeguards, no debugging mechanisms, only what the kernel provides
itself. It has absolute and complete power over the
machine.
*/
/*
To set up a stack, we set the esp register to point to the top of the
stack (as it grows downwards on x86 systems). This is necessarily done
in assembly as languages such as C cannot function without a stack.
*/
mov $stack_top, %esp
/*
Multiboot Info
kernel_main expects multiboot magic, multiboot info
Multiboot spec says those come in eax, ebx, push in reverse order before calling.
*/
push %ebx
push %eax
/*
Enter the high-level kernel. The ABI requires the stack is 16-byte
aligned at the time of the call instruction (which afterwards pushes
the return pointer of size 4 bytes). The stack was originally 16-byte
aligned above and we've pushed a multiple of 16 bytes to the
stack since (pushed 0 bytes so far), so the alignment has thus been
preserved and the call is well defined.
*/
call kernel_main
/*
If the system has nothing more to do, put the computer into an
infinite loop. To do that:
1) Disable interrupts with cli (clear interrupt enable in eflags).
They are already disabled by the bootloader, so this is not needed.
Mind that you might later enable interrupts and return from
kernel_main (which is sort of nonsensical to do).
2) Wait for the next interrupt to arrive with hlt (halt instruction).
Since they are disabled, this will lock up the computer.
3) Jump to the hlt instruction if it ever wakes up due to a
non-maskable interrupt occurring or due to system management mode.
*/
cli
1: hlt
jmp 1b
/*
Set the size of the _start symbol to the current location '.' minus its start.
This is useful when debugging or when you implement call tracing.
*/
.size _start, . - _start
|
vj4dsc/https-github.com-bytecodealliance-wasmtime
| 4,165
|
crates/fiber/src/unix/s390x.S
|
// A WORD OF CAUTION
//
// This entire file basically needs to be kept in sync with itself. It's not
// really possible to modify just one bit of this file without understanding
// all the other bits. Documentation tries to reference various bits here and
// there but try to make sure to read over everything before tweaking things!
//
// Also at this time this file is heavily based off the x86_64 file, so you'll
// probably want to read that one as well.
.text
#define CONCAT2(a, b) a ## b
#define CONCAT(a, b) CONCAT2(a , b)
#define VERSIONED_SYMBOL(a) CONCAT(a, VERSIONED_SUFFIX)
#define GLOBL(fnname) .globl VERSIONED_SYMBOL(fnname)
#define HIDDEN(fnname) .hidden VERSIONED_SYMBOL(fnname)
#define TYPE(fnname) .type VERSIONED_SYMBOL(fnname),@function
#define FUNCTION(fnname) VERSIONED_SYMBOL(fnname)
#define SIZE(fnname) .size VERSIONED_SYMBOL(fnname),.-VERSIONED_SYMBOL(fnname)
// fn(top_of_stack(%x0): *mut u8)
HIDDEN(wasmtime_fiber_switch)
GLOBL(wasmtime_fiber_switch)
.p2align 2
TYPE(wasmtime_fiber_switch)
FUNCTION(wasmtime_fiber_switch):
// Save all callee-saved registers on the stack since we're assuming
// they're clobbered as a result of the stack switch.
stmg %r6, %r15, 48(%r15)
aghi %r15, -64
std %f8, 0(%r15)
std %f9, 8(%r15)
std %f10, 16(%r15)
std %f11, 24(%r15)
std %f12, 32(%r15)
std %f13, 40(%r15)
std %f14, 48(%r15)
std %f15, 56(%r15)
// Load our previously saved stack pointer to resume to, and save off our
// current stack pointer on where to come back to eventually.
lg %r1, -16(%r2)
stg %r15, -16(%r2)
// Switch to the new stack and restore all our callee-saved registers after
// the switch and return to our new stack.
ld %f8, 0(%r1)
ld %f9, 8(%r1)
ld %f10, 16(%r1)
ld %f11, 24(%r1)
ld %f12, 32(%r1)
ld %f13, 40(%r1)
ld %f14, 48(%r1)
ld %f15, 56(%r1)
lmg %r6, %r15, 112(%r1)
br %r14
SIZE(wasmtime_fiber_switch)
// fn(
// top_of_stack(%x0): *mut u8,
// entry_point(%x1): extern fn(*mut u8, *mut u8),
// entry_arg0(%x2): *mut u8,
// )
HIDDEN(wasmtime_fiber_init)
GLOBL(wasmtime_fiber_init)
.p2align 2
TYPE(wasmtime_fiber_init)
FUNCTION(wasmtime_fiber_init):
larl %r1, FUNCTION(wasmtime_fiber_start)
stg %r1, -48(%r2) // wasmtime_fiber_start - restored into %r14
stg %r2, -112(%r2) // top_of_stack - restored into %r6
stg %r3, -104(%r2) // entry_point - restored into %r7
stg %r4, -96(%r2) // entry_arg0 - restored into %r8
aghi %r2, -160 // 160 bytes register save area
stg %r2, 120(%r2) // bottom of register save area - restored into %r15
// `wasmtime_fiber_switch` has a 64 byte stack.
aghi %r2, -64
stg %r2, 208(%r2)
br %r14
SIZE(wasmtime_fiber_init)
.p2align 2
TYPE(wasmtime_fiber_start)
FUNCTION(wasmtime_fiber_start):
.cfi_startproc simple
.cfi_def_cfa_offset 0
// See the x86_64 file for more commentary on what these CFI directives are
// doing. Like over there note that the relative offsets to registers here
// match the frame layout in `wasmtime_fiber_switch`.
.cfi_escape 0x0f, /* DW_CFA_def_cfa_expression */ \
7, /* the byte length of this expression */ \
0x7f, 0x90, 0x1, /* DW_OP_breg15 0x90 */ \
0x06, /* DW_OP_deref */ \
0x23, 0xe0, 0x1 /* DW_OP_plus_uconst 0xe0 */
.cfi_rel_offset 6, -112
.cfi_rel_offset 7, -104
.cfi_rel_offset 8, -96
.cfi_rel_offset 9, -88
.cfi_rel_offset 10, -80
.cfi_rel_offset 11, -72
.cfi_rel_offset 12, -64
.cfi_rel_offset 13, -56
.cfi_rel_offset 14, -48
.cfi_rel_offset 15, -40
// Load our two arguments prepared by `wasmtime_fiber_init`.
lgr %r2, %r8 // entry_arg0
lgr %r3, %r6 // top_of_stack
// ... and then we call the function! Note that this is a function call so
// our frame stays on the stack to backtrace through.
basr %r14, %r7 // entry_point
// .. technically we shouldn't get here, so just trap.
.word 0x0000
.cfi_endproc
SIZE(wasmtime_fiber_start)
// Mark that we don't need executable stack.
.section .note.GNU-stack,"",%progbits
|
vj4dsc/https-github.com-bytecodealliance-wasmtime
| 4,052
|
crates/runtime/src/arch/s390x.S
|
// Currently `global_asm!` isn't stable on s390x, so this is an external
// assembler file built with the `build.rs`.
.machine z13
.text
.hidden host_to_wasm_trampoline
.globl host_to_wasm_trampoline
.type host_to_wasm_trampoline,@function
.p2align 2
#define CONCAT2(a, b) a ## b
#define CONCAT(a, b) CONCAT2(a , b)
#define VERSIONED_SYMBOL(a) CONCAT(a, VERSIONED_SUFFIX)
#define LIBCALL_TRAMPOLINE(libcall, libcall_impl) \
.hidden VERSIONED_SYMBOL(libcall) ; \
.globl VERSIONED_SYMBOL(libcall) ; \
.type VERSIONED_SYMBOL(libcall),@function ; \
.p2align 2 ; \
VERSIONED_SYMBOL(libcall): ; \
.cfi_startproc ; \
\
/* Load the pointer to `VMRuntimeLimits` in `%r1`. */ \
lg %r1, 8(%r2) ; \
\
/* Store the last Wasm FP into the `last_wasm_exit_fp` in the limits. */ \
lg %r0, 0(%r15) ; \
stg %r0, 24(%r1) ; \
\
/* Store the last Wasm PC into the `last_wasm_exit_pc` in the limits. */ \
stg %r14, 32(%r1) ; \
\
/* Tail call to the actual implementation of this libcall. */ \
jg VERSIONED_SYMBOL(libcall_impl) ; \
\
.cfi_endproc ; \
.size VERSIONED_SYMBOL(libcall),.-VERSIONED_SYMBOL(libcall)
LIBCALL_TRAMPOLINE(memory32_grow, impl_memory32_grow)
LIBCALL_TRAMPOLINE(table_grow_func_ref, impl_table_grow_func_ref)
LIBCALL_TRAMPOLINE(table_grow_externref, impl_table_grow_externref)
LIBCALL_TRAMPOLINE(table_fill_func_ref, impl_table_fill_func_ref)
LIBCALL_TRAMPOLINE(table_fill_externref, impl_table_fill_externref)
LIBCALL_TRAMPOLINE(table_copy, impl_table_copy)
LIBCALL_TRAMPOLINE(table_init, impl_table_init)
LIBCALL_TRAMPOLINE(elem_drop, impl_elem_drop)
LIBCALL_TRAMPOLINE(memory_copy, impl_memory_copy)
LIBCALL_TRAMPOLINE(memory_fill, impl_memory_fill)
LIBCALL_TRAMPOLINE(memory_init, impl_memory_init)
LIBCALL_TRAMPOLINE(ref_func, impl_ref_func)
LIBCALL_TRAMPOLINE(data_drop, impl_data_drop)
LIBCALL_TRAMPOLINE(table_get_lazy_init_func_ref, impl_table_get_lazy_init_func_ref)
LIBCALL_TRAMPOLINE(drop_gc_ref, impl_drop_gc_ref)
LIBCALL_TRAMPOLINE(gc, gc)
LIBCALL_TRAMPOLINE(gc_ref_global_get, impl_gc_ref_global_get)
LIBCALL_TRAMPOLINE(gc_ref_global_set, impl_gc_ref_global_set)
LIBCALL_TRAMPOLINE(memory_atomic_notify, impl_memory_atomic_notify)
LIBCALL_TRAMPOLINE(memory_atomic_wait32, impl_memory_atomic_wait32)
LIBCALL_TRAMPOLINE(memory_atomic_wait64, impl_memory_atomic_wait64)
LIBCALL_TRAMPOLINE(out_of_gas, impl_out_of_gas)
LIBCALL_TRAMPOLINE(new_epoch, impl_new_epoch)
LIBCALL_TRAMPOLINE(check_malloc, impl_check_malloc)
LIBCALL_TRAMPOLINE(check_free, impl_check_free)
LIBCALL_TRAMPOLINE(check_load, impl_check_load)
LIBCALL_TRAMPOLINE(check_store, impl_check_store)
LIBCALL_TRAMPOLINE(malloc_start, impl_malloc_start)
LIBCALL_TRAMPOLINE(free_start, impl_free_start)
LIBCALL_TRAMPOLINE(update_stack_pointer, impl_update_stack_pointer)
LIBCALL_TRAMPOLINE(update_mem_size, impl_update_mem_size)
|
vladimirca2000/rust_minix
| 4,242
|
src/arch/exceptions.S
|
.section .text
// Exception vector table for AArch64
// Each entry is 0x80 bytes (128 bytes) apart
.align 11
.global exception_vector_table
exception_vector_table:
// Current EL with SP0
.align 7
curr_el_sp0_sync:
b default_exception_handler
.align 7
curr_el_sp0_irq:
b default_exception_handler
.align 7
curr_el_sp0_fiq:
b default_exception_handler
.align 7
curr_el_sp0_serror:
b default_exception_handler
// Current EL with SPx
.align 7
curr_el_spx_sync:
b sync_exception_handler
.align 7
curr_el_spx_irq:
b irq_exception_handler
.align 7
curr_el_spx_fiq:
b fiq_exception_handler
.align 7
curr_el_spx_serror:
b serror_exception_handler
// Lower EL using AArch64
.align 7
lower_el_aarch64_sync:
b sync_exception_handler_el0
.align 7
lower_el_aarch64_irq:
b irq_exception_handler_el0
.align 7
lower_el_aarch64_fiq:
b fiq_exception_handler_el0
.align 7
lower_el_aarch64_serror:
b serror_exception_handler_el0
// Lower EL using AArch32
.align 7
lower_el_aarch32_sync:
b default_exception_handler
.align 7
lower_el_aarch32_irq:
b default_exception_handler
.align 7
lower_el_aarch32_fiq:
b default_exception_handler
.align 7
lower_el_aarch32_serror:
b default_exception_handler
// Exception context save/restore macros
.macro save_context
// Allocate space for ExceptionContext on stack
sub sp, sp, #272 // 31*8 + 8 + 8 + 8 = 272 bytes
// Save general purpose registers x0-x30
stp x0, x1, [sp, #0]
stp x2, x3, [sp, #16]
stp x4, x5, [sp, #32]
stp x6, x7, [sp, #48]
stp x8, x9, [sp, #64]
stp x10, x11, [sp, #80]
stp x12, x13, [sp, #96]
stp x14, x15, [sp, #112]
stp x16, x17, [sp, #128]
stp x18, x19, [sp, #144]
stp x20, x21, [sp, #160]
stp x22, x23, [sp, #176]
stp x24, x25, [sp, #192]
stp x26, x27, [sp, #208]
stp x28, x29, [sp, #224]
str x30, [sp, #240]
// Save sp_el0
mrs x0, sp_el0
str x0, [sp, #248]
// Save elr_el1 and spsr_el1
mrs x0, elr_el1
str x0, [sp, #256]
mrs x0, spsr_el1
str x0, [sp, #264]
.endm
.macro restore_context
// Restore elr_el1 and spsr_el1
ldr x0, [sp, #256]
msr elr_el1, x0
ldr x0, [sp, #264]
msr spsr_el1, x0
// Restore sp_el0
ldr x0, [sp, #248]
msr sp_el0, x0
// Restore general purpose registers
ldp x28, x29, [sp, #224]
ldr x30, [sp, #240]
ldp x26, x27, [sp, #208]
ldp x24, x25, [sp, #192]
ldp x22, x23, [sp, #176]
ldp x20, x21, [sp, #160]
ldp x18, x19, [sp, #144]
ldp x16, x17, [sp, #128]
ldp x14, x15, [sp, #112]
ldp x12, x13, [sp, #96]
ldp x10, x11, [sp, #80]
ldp x8, x9, [sp, #64]
ldp x6, x7, [sp, #48]
ldp x4, x5, [sp, #32]
ldp x2, x3, [sp, #16]
ldp x0, x1, [sp, #0]
// Deallocate stack space
add sp, sp, #272
.endm
// Exception handlers
sync_exception_handler:
save_context
mov x0, sp // Pass context as parameter
bl sync_exception_el1
restore_context
eret
irq_exception_handler:
save_context
mov x0, sp // Pass context as parameter
bl irq_exception_el1
restore_context
eret
fiq_exception_handler:
save_context
mov x0, sp // Pass context as parameter
bl fiq_exception_el1
restore_context
eret
serror_exception_handler:
save_context
mov x0, sp // Pass context as parameter
bl serror_exception_el1
restore_context
eret
sync_exception_handler_el0:
save_context
mov x0, sp // Pass context as parameter
bl sync_exception_el0
restore_context
eret
irq_exception_handler_el0:
save_context
mov x0, sp // Pass context as parameter
bl irq_exception_el0
restore_context
eret
fiq_exception_handler_el0:
save_context
mov x0, sp // Pass context as parameter
bl fiq_exception_el0
restore_context
eret
serror_exception_handler_el0:
save_context
mov x0, sp // Pass context as parameter
bl serror_exception_el0
restore_context
eret
default_exception_handler:
// For now, just hang
wfe
b default_exception_handler
|
vmpl-dev/libvmpl-rs
| 8,660
|
src/start/dune.S
|
/*
* dune.S - assembly helper routines (e.g. system calls, interrupts, traps)
*/
#define __ASSEMBLY__
#include "vmpl-core.h"
#define USE_RDWRGSFS 1
#define MSR_FS_BASE 0xc0000100
#define GD_KT 0x10
#define GD_KD 0x18
#define GD_UD 0x28 | 0x03
#define GD_UT 0x30 | 0x03
/*
* Trap Frame Format
* NOTE: this reflects the layout of struct dune_tf
*/
/* arguments */
#define RDI (0)
#define RSI (8)
#define RDX (16)
#define RCX (24)
#define R8 (32)
#define R9 (40)
/* other registers */
#define R10 (48)
#define R11 (56)
#define RBX (64)
#define RBP (72)
#define R12 (80)
#define R13 (88)
#define R14 (96)
#define R15 (104)
#define REG_END (112)
/* syscall num / return code */
#define RAX (112)
/* exception frame */
#define ERR (120)
#define RIP (128)
#define CS (136)
#define RFLAGS (144)
#define RSP (152)
#define SS (160)
#define EF_START (128)
#define TF_END (168)
#define TF_ALIGN (176)
/*
* Supervisor Private Area Format
*/
#define TMP (8)
#define KFS_BASE (16)
#define UFS_BASE (24)
#define IN_USERMODE (32)
#define TRAP_STACK (44)
.text
/*
* macro to save destructable register state
*/
.macro SAVE_REGS save_full=1, include_rax=1
movq %rdi, RDI(%rsp)
movq %rsi, RSI(%rsp)
movq %rdx, RDX(%rsp)
movq %r8, R8(%rsp)
movq %r9, R9(%rsp)
.if \save_full
movq %r10, R10(%rsp)
movq %r11, R11(%rsp)
movq %rcx, RCX(%rsp)
.endif
.if \include_rax
movq %rax, RAX(%rsp)
.endif
.endm
/*
* macro to save the rest of register state
*
* useful for operations that violate AMD64 calling conventions
* by destroying callee restored state
*/
.macro SAVE_REST
movq %rbx, RBX(%rsp)
movq %rbp, RBP(%rsp)
movq %r12, R12(%rsp)
movq %r13, R13(%rsp)
movq %r14, R14(%rsp)
movq %r15, R15(%rsp)
.endm
/*
* macro to restore destructable register state
*/
.macro RESTORE_REGS rstor_full=1, include_rax=1
.if \include_rax
movq RAX(%rsp), %rax
.endif
.if \rstor_full
movq RCX(%rsp), %rcx
movq R11(%rsp), %r11
movq R10(%rsp), %r10
.endif
movq R9(%rsp), %r9
movq R8(%rsp), %r8
movq RDX(%rsp), %rdx
movq RSI(%rsp), %rsi
movq RDI(%rsp), %rdi
.endm
/*
* macro to restore the rest of register state
*
* useful for operations that violate AMD64 calling conventions
* by destroying callee restored state
*/
.macro RESTORE_REST
movq R15(%rsp), %r15
movq R14(%rsp), %r14
movq R13(%rsp), %r13
movq R12(%rsp), %r12
movq RBP(%rsp), %rbp
movq RBX(%rsp), %rbx
.endm
/*
* macro to switch to G0 fs.base
*
* NOTE: clobbers %rax, %rdx, and %rcx
*/
.macro SET_G0_FS_BASE
movq $0, %gs:IN_USERMODE
movq %gs:KFS_BASE, %rax
movq %gs:UFS_BASE, %rdx
cmp %rax, %rdx
je 1f
#if USE_RDWRGSFS
wrfsbase %rax
#else
movq %rax, %rdx
shrq $32, %rdx
movl $MSR_FS_BASE, %ecx
wrmsr
#endif /* USE_RDWRGSFS */
1:
.endm
/*
* macro to switch to G3 fs.base
*
* NOTE: clobbers %rax, %rdx, and %rcx
*/
.macro SET_G3_FS_BASE
movq $1, %gs:IN_USERMODE
movq %gs:UFS_BASE, %rax
movq %gs:KFS_BASE, %rdx
cmp %rax, %rdx
je 1f
#if USE_RDWRGSFS
wrfsbase %rax
#else
movq %rax, %rdx
shrq $32, %rdx
movl $MSR_FS_BASE, %ecx
wrmsr
#endif /* USE_RDWRGSFS */
1:
.endm
.globl __dune_enter
__dune_enter:
pushfq
subq $REG_END, %rsp
SAVE_REGS 1, 0
SAVE_REST
movq %rsp, DUNE_CFG_RSP(%rsi)
movq %rsi, %rdx
movq $IOCTL_DUNE_ENTER, %rsi
movq $16, %rax /* __NR_ioctl */
syscall
cmpq $0, %rax
jnz __dune_ret_fail
movq %rdi, %r12
movq %rdx, %r13
__dune_retry:
/* Dune mode was terminated, call on_dune_exit handler function */
movq %r13, %rdi
/* Update RSP to point to the top of the stack as seen from inside Dune
* mode. */
movq DUNE_CFG_RSP(%rdi), %rsp
/* Go past the red zone mandated by the System V x86-64 ABI. */
subq $128, %rsp
call on_dune_exit
int3 /* sentinel: on_dune_exit should not return */
__dune_reenter:
movq %r12, %rdi
movq %r13, %rdx
movq $IOCTL_DUNE_ENTER, %rsi
movq $16, %rax /* __NR_ioctl */
syscall
cmpq $0, %rax
jz __dune_retry
movq %rax, DUNE_CFG_STATUS(%r13)
movq $DUNE_RET_NOENTER, DUNE_CFG_RET(%r13)
jmp __dune_retry
/* __dune_ret is the location of the first instruction executed in Dune mode */
.globl __dune_ret
__dune_ret:
xorq %rax, %rax /* return 0 from __dune_enter */
__dune_ret_fail:
RESTORE_REST
RESTORE_REGS 1, 0
addq $REG_END, %rsp
popfq
retq
.globl __dune_go_linux
__dune_go_linux:
movq DUNE_CFG_RCX(%rdi), %rcx
movq DUNE_CFG_RBX(%rdi), %rbx
movq DUNE_CFG_RDX(%rdi), %rdx
movq DUNE_CFG_RBP(%rdi), %rbp
movq DUNE_CFG_RSI(%rdi), %rsi
movq DUNE_CFG_R8 (%rdi), %r8
movq DUNE_CFG_R9 (%rdi), %r9
movq DUNE_CFG_R10(%rdi), %r10
movq DUNE_CFG_R11(%rdi), %r11
movq DUNE_CFG_R12(%rdi), %r12
movq DUNE_CFG_R13(%rdi), %r13
movq DUNE_CFG_R14(%rdi), %r14
movq DUNE_CFG_R15(%rdi), %r15
mov %ss, %rax
push %rax
pushq DUNE_CFG_RSP(%rdi)
pushq DUNE_CFG_RFLAGS(%rdi)
mov %cs, %rax
push %rax
pushq DUNE_CFG_RIP(%rdi)
pushq DUNE_CFG_RFLAGS(%rdi)
movq DUNE_CFG_RAX(%rdi), %rax
movq DUNE_CFG_RDI(%rdi), %rdi
/* We are restoring the flags here, so that RFLAGS.TF is set when IRETQ is
* executed, so that an INT 1 will be raised _before_ executing the
* instruction at CS:RIP we are jumping to. */
popf
iretq
.globl __dune_go_dune
__dune_go_dune:
movq %rdi, %r12
movq %rsi, %r13
jmp __dune_reenter
/*
* System Call ABI
* ---------------
*
* User Parameters:
* %rsp - stack pointer
* %rcx - instruction pointer
* %r11 - eflags
* %rax - system call number
*
* Arguments:
* %rdi - arg0, %rsi - arg1, %rdx - arg2
* %r10 - arg3, %r8 - arg4, %r9 - arg5
*
* Return code goes in %rax
*
* XXX: don't do relative jumps - watch out code is memcpy
*/
.globl __dune_syscall
__dune_syscall:
/* handle system calls from G0 */
testq $1, %gs:IN_USERMODE
jnz 1f
pushq %r11
push %rax
push %rcx
push %rdx
mov $0xc0010130, %ecx
mov $16, %eax
xor %edx, %edx
wrmsr
pop %rdx
pop %rcx
pop %rax
popfq
vmgexit
jmp *%rcx
1:
/* first switch to the kernel stack */
movq %rsp, %gs:TMP
movq %gs:TRAP_STACK, %rsp
/* now push the trap frame onto the stack */
subq $TF_END, %rsp
movq %rcx, RIP(%rsp)
movq %r11, RFLAGS(%rsp)
movq %r10, RCX(%rsp) /* fixup to standard 64-bit calling ABI */
SAVE_REGS 0, 1
movq %gs:TMP, %rax
movq %rax, RSP(%rsp)
/* then restore the CPL0 FS base address */
SET_G0_FS_BASE
/* then finally re-enable interrupts and jump to the handler */
sti
movq %rsp, %rdi /* argument 0 */
/* lea dune_syscall_handler@GOT, %rax */
/* call *%rax */
call dune_syscall_handler
/* next restore the CPL3 FS base address */
SET_G3_FS_BASE
/* then pop the trap frame off the stack */
RESTORE_REGS 0, 1
movq RCX(%rsp), %r10
movq RFLAGS(%rsp), %r11
movq RIP(%rsp), %rcx
/* switch to the user stack and return to ring 3 */
movq RSP(%rsp), %rsp
sysretq
.globl __dune_syscall_end
__dune_syscall_end:
nop
.globl dune_pop_trap_frame
dune_pop_trap_frame:
movq %rdi, %rsp /* might actually not be a stack!!! */
/* load the full register state */
RESTORE_REGS
RESTORE_REST
/* jump to the frame */
addq $EF_START, %rsp
iretq
.globl dune_jump_to_user
dune_jump_to_user:
subq $TF_ALIGN, %rsp
/* save the full register state */
SAVE_REGS
SAVE_REST
pushfq
popq RFLAGS(%rsp)
/* save the stack pointer */
movq %rsp, %gs:TRAP_STACK
/* set the CPL 3 FS.base */
SET_G3_FS_BASE
/* jump into G3 */
movq $GD_UT, CS(%rdi)
movq $GD_UD, SS(%rdi)
jmp dune_pop_trap_frame
.globl dune_ret_from_user
dune_ret_from_user:
/* restore the G0 stack */
movq %rdi, %rsi
movq %gs:TRAP_STACK, %rdi
/* return code */
movq %rsi, RAX(%rdi)
/* fill in remaining exception frame data */
lea dune_ret_from_user_finish(%rip), %rax
movq %rax, RIP(%rdi)
movq $GD_KT, CS(%rdi)
movq $GD_KD, SS(%rdi)
movq %rdi, RSP(%rdi)
/* return to the caller */
jmp dune_pop_trap_frame
dune_ret_from_user_finish:
addq $TF_ALIGN, %rsp
ret
.globl __dune_intr
.align 16
__dune_intr:
i = 0
.rept 256
.align 16
.if i <> 8 && (i <= 9 || i >= 15) && i <> 17 && i <> 29 && i <> 30
pushq %rax /* placeholder for no error code */
.endif
pushq %rax /* save %rax */
mov $i, %rax
jmp __dune_intr_with_num
i = i + 1
.endr
__dune_intr_with_num:
/* save all registers */
subq $REG_END, %rsp
SAVE_REGS 1, 0 /* %rax already is pushed */
SAVE_REST
movq %rax, %rdi
/* then restore the CPL0 FS base address */
testq $3, CS(%rsp)
jz __dune_intr_handler
SET_G0_FS_BASE
__dune_intr_handler:
/* setup arguments and call the handler */
movq %rsp, %rsi
call dune_trap_handler
/* next restore the CPL3 FS base address */
testq $3, CS(%rsp)
jz __dune_intr_done
SET_G3_FS_BASE
__dune_intr_done:
/* load all registers */
RESTORE_REST
RESTORE_REGS
/* jump to the frame */
addq $EF_START, %rsp
iretq
|
VR4sigma1/spectre2
| 10,572
|
src/asm/keccakf1600_x86-64-win64.s
|
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.def __KeccakF1600; .scl 3; .type 32; .endef
.p2align 5
__KeccakF1600:
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp .Loop
.p2align 5
.Loop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz .Loop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.globl KeccakF1600
.def KeccakF1600; .scl 2; .type 32; .endef
.p2align 5
KeccakF1600:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_KeccakF1600:
movq %rcx,%rdi
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
leaq 100(%rdi),%rdi
subq $200,%rsp
.LSEH_body_KeccakF1600:
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
leaq 248(%rsp),%r11
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.LSEH_epilogue_KeccakF1600:
mov 8(%r11),%rdi
mov 16(%r11),%rsi
.byte 0xf3,0xc3
.LSEH_end_KeccakF1600:
.globl SHA3_absorb
.def SHA3_absorb; .scl 2; .type 32; .endef
.p2align 5
SHA3_absorb:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_SHA3_absorb:
movq %rcx,%rdi
movq %rdx,%rsi
movq %r8,%rdx
movq %r9,%rcx
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
leaq 100(%rdi),%rdi
subq $232,%rsp
.LSEH_body_SHA3_absorb:
movq %rsi,%r9
leaq 100(%rsp),%rsi
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
movq %rcx,216-100(%rsi)
.Loop_absorb:
cmpq %rcx,%rdx
jc .Ldone_absorb
shrq $3,%rcx
leaq -100(%rdi),%r8
.Lblock_absorb:
movq (%r9),%rax
leaq 8(%r9),%r9
xorq (%r8),%rax
leaq 8(%r8),%r8
subq $8,%rdx
movq %rax,-8(%r8)
subq $1,%rcx
jnz .Lblock_absorb
movq %r9,200-100(%rsi)
movq %rdx,208-100(%rsi)
call __KeccakF1600
movq 200-100(%rsi),%r9
movq 208-100(%rsi),%rdx
movq 216-100(%rsi),%rcx
jmp .Loop_absorb
.p2align 5
.Ldone_absorb:
movq %rdx,%rax
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq 280(%rsp),%r11
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.LSEH_epilogue_SHA3_absorb:
mov 8(%r11),%rdi
mov 16(%r11),%rsi
.byte 0xf3,0xc3
.LSEH_end_SHA3_absorb:
.globl SHA3_squeeze
.def SHA3_squeeze; .scl 2; .type 32; .endef
.p2align 5
SHA3_squeeze:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_SHA3_squeeze:
movq %rcx,%rdi
movq %rdx,%rsi
movq %r8,%rdx
movq %r9,%rcx
pushq %r12
pushq %r13
pushq %r14
subq $32,%rsp
.LSEH_body_SHA3_squeeze:
shrq $3,%rcx
movq %rdi,%r8
movq %rsi,%r12
movq %rdx,%r13
movq %rcx,%r14
jmp .Loop_squeeze
.p2align 5
.Loop_squeeze:
cmpq $8,%r13
jb .Ltail_squeeze
movq (%r8),%rax
leaq 8(%r8),%r8
movq %rax,(%r12)
leaq 8(%r12),%r12
subq $8,%r13
jz .Ldone_squeeze
subq $1,%rcx
jnz .Loop_squeeze
movq %rdi,%rcx
call KeccakF1600
movq %rdi,%r8
movq %r14,%rcx
jmp .Loop_squeeze
.Ltail_squeeze:
movq %r8,%rsi
movq %r12,%rdi
movq %r13,%rcx
.byte 0xf3,0xa4
.Ldone_squeeze:
movq 32(%rsp),%r14
movq 40(%rsp),%r13
movq 48(%rsp),%r12
addq $56,%rsp
.LSEH_epilogue_SHA3_squeeze:
mov 8(%rsp),%rdi
mov 16(%rsp),%rsi
.byte 0xf3,0xc3
.LSEH_end_SHA3_squeeze:
.p2align 8
.quad 0,0,0,0,0,0,0,0
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.section .pdata
.p2align 2
.rva .LSEH_begin_KeccakF1600
.rva .LSEH_body_KeccakF1600
.rva .LSEH_info_KeccakF1600_prologue
.rva .LSEH_body_KeccakF1600
.rva .LSEH_epilogue_KeccakF1600
.rva .LSEH_info_KeccakF1600_body
.rva .LSEH_epilogue_KeccakF1600
.rva .LSEH_end_KeccakF1600
.rva .LSEH_info_KeccakF1600_epilogue
.rva .LSEH_begin_SHA3_absorb
.rva .LSEH_body_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_prologue
.rva .LSEH_body_SHA3_absorb
.rva .LSEH_epilogue_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_body
.rva .LSEH_epilogue_SHA3_absorb
.rva .LSEH_end_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_epilogue
.rva .LSEH_begin_SHA3_squeeze
.rva .LSEH_body_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_prologue
.rva .LSEH_body_SHA3_squeeze
.rva .LSEH_epilogue_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_body
.rva .LSEH_epilogue_SHA3_squeeze
.rva .LSEH_end_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_epilogue
.section .xdata
.p2align 3
.LSEH_info_KeccakF1600_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_KeccakF1600_body:
.byte 1,0,18,0
.byte 0x00,0xf4,0x19,0x00
.byte 0x00,0xe4,0x1a,0x00
.byte 0x00,0xd4,0x1b,0x00
.byte 0x00,0xc4,0x1c,0x00
.byte 0x00,0x54,0x1d,0x00
.byte 0x00,0x34,0x1e,0x00
.byte 0x00,0x74,0x20,0x00
.byte 0x00,0x64,0x21,0x00
.byte 0x00,0x01,0x1f,0x00
.byte 0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_KeccakF1600_epilogue:
.byte 1,0,5,11
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0xb3
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_absorb_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_SHA3_absorb_body:
.byte 1,0,18,0
.byte 0x00,0xf4,0x1d,0x00
.byte 0x00,0xe4,0x1e,0x00
.byte 0x00,0xd4,0x1f,0x00
.byte 0x00,0xc4,0x20,0x00
.byte 0x00,0x54,0x21,0x00
.byte 0x00,0x34,0x22,0x00
.byte 0x00,0x74,0x24,0x00
.byte 0x00,0x64,0x25,0x00
.byte 0x00,0x01,0x23,0x00
.byte 0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_absorb_epilogue:
.byte 1,0,5,11
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0xb3
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_squeeze_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_SHA3_squeeze_body:
.byte 1,0,11,0
.byte 0x00,0xe4,0x04,0x00
.byte 0x00,0xd4,0x05,0x00
.byte 0x00,0xc4,0x06,0x00
.byte 0x00,0x74,0x08,0x00
.byte 0x00,0x64,0x09,0x00
.byte 0x00,0x62
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.LSEH_info_SHA3_squeeze_epilogue:
.byte 1,0,4,0
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0x00,0x00,0x00
|
VR4sigma1/spectre2
| 8,238
|
src/asm/keccakf1600_x86-64-osx.s
|
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.p2align 5
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp L$oop
.p2align 5
L$oop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz L$oop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.globl _KeccakF1600
.p2align 5
_KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
leaq 248(%rsp),%r11
.cfi_def_cfa %r11,8
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.globl _SHA3_absorb
.p2align 5
_SHA3_absorb:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $232,%rsp
.cfi_adjust_cfa_offset 232
movq %rsi,%r9
leaq 100(%rsp),%rsi
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
movq %rcx,216-100(%rsi)
L$oop_absorb:
cmpq %rcx,%rdx
jc L$done_absorb
shrq $3,%rcx
leaq -100(%rdi),%r8
L$block_absorb:
movq (%r9),%rax
leaq 8(%r9),%r9
xorq (%r8),%rax
leaq 8(%r8),%r8
subq $8,%rdx
movq %rax,-8(%r8)
subq $1,%rcx
jnz L$block_absorb
movq %r9,200-100(%rsi)
movq %rdx,208-100(%rsi)
call __KeccakF1600
movq 200-100(%rsi),%r9
movq 208-100(%rsi),%rdx
movq 216-100(%rsi),%rcx
jmp L$oop_absorb
.p2align 5
L$done_absorb:
movq %rdx,%rax
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq 280(%rsp),%r11
.cfi_def_cfa %r11,8
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.globl _SHA3_squeeze
.p2align 5
_SHA3_squeeze:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-24
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-32
subq $32,%rsp
.cfi_adjust_cfa_offset 32
shrq $3,%rcx
movq %rdi,%r8
movq %rsi,%r12
movq %rdx,%r13
movq %rcx,%r14
jmp L$oop_squeeze
.p2align 5
L$oop_squeeze:
cmpq $8,%r13
jb L$tail_squeeze
movq (%r8),%rax
leaq 8(%r8),%r8
movq %rax,(%r12)
leaq 8(%r12),%r12
subq $8,%r13
jz L$done_squeeze
subq $1,%rcx
jnz L$oop_squeeze
movq %rdi,%rcx
call _KeccakF1600
movq %rdi,%r8
movq %r14,%rcx
jmp L$oop_squeeze
L$tail_squeeze:
movq %r8,%rsi
movq %r12,%rdi
movq %r13,%rcx
.byte 0xf3,0xa4
L$done_squeeze:
movq 32(%rsp),%r14
movq 40(%rsp),%r13
movq 48(%rsp),%r12
addq $56,%rsp
.cfi_adjust_cfa_offset -56
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.byte 0xf3,0xc3
.cfi_endproc
.p2align 8
.quad 0,0,0,0,0,0,0,0
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
VR4sigma1/spectre2
| 8,619
|
src/asm/keccakf1600_x86-64-elf.s
|
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.type __KeccakF1600,@function
.align 32
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp .Loop
.align 32
.Loop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz .Loop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.size __KeccakF1600,.-__KeccakF1600
.globl KeccakF1600
.type KeccakF1600,@function
.align 32
KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
leaq 248(%rsp),%r11
.cfi_def_cfa %r11,8
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.size KeccakF1600,.-KeccakF1600
.globl SHA3_absorb
.type SHA3_absorb,@function
.align 32
SHA3_absorb:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $232,%rsp
.cfi_adjust_cfa_offset 232
movq %rsi,%r9
leaq 100(%rsp),%rsi
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
movq %rcx,216-100(%rsi)
.Loop_absorb:
cmpq %rcx,%rdx
jc .Ldone_absorb
shrq $3,%rcx
leaq -100(%rdi),%r8
.Lblock_absorb:
movq (%r9),%rax
leaq 8(%r9),%r9
xorq (%r8),%rax
leaq 8(%r8),%r8
subq $8,%rdx
movq %rax,-8(%r8)
subq $1,%rcx
jnz .Lblock_absorb
movq %r9,200-100(%rsi)
movq %rdx,208-100(%rsi)
call __KeccakF1600
movq 200-100(%rsi),%r9
movq 208-100(%rsi),%rdx
movq 216-100(%rsi),%rcx
jmp .Loop_absorb
.align 32
.Ldone_absorb:
movq %rdx,%rax
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq 280(%rsp),%r11
.cfi_def_cfa %r11,8
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.size SHA3_absorb,.-SHA3_absorb
.globl SHA3_squeeze
.type SHA3_squeeze,@function
.align 32
SHA3_squeeze:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-24
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-32
subq $32,%rsp
.cfi_adjust_cfa_offset 32
shrq $3,%rcx
movq %rdi,%r8
movq %rsi,%r12
movq %rdx,%r13
movq %rcx,%r14
jmp .Loop_squeeze
.align 32
.Loop_squeeze:
cmpq $8,%r13
jb .Ltail_squeeze
movq (%r8),%rax
leaq 8(%r8),%r8
movq %rax,(%r12)
leaq 8(%r12),%r12
subq $8,%r13
jz .Ldone_squeeze
subq $1,%rcx
jnz .Loop_squeeze
movq %rdi,%rcx
call KeccakF1600
movq %rdi,%r8
movq %r14,%rcx
jmp .Loop_squeeze
.Ltail_squeeze:
movq %r8,%rsi
movq %r12,%rdi
movq %r13,%rcx
.byte 0xf3,0xa4
.Ldone_squeeze:
movq 32(%rsp),%r14
movq 40(%rsp),%r13
movq 48(%rsp),%r12
addq $56,%rsp
.cfi_adjust_cfa_offset -56
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.byte 0xf3,0xc3
.cfi_endproc
.size SHA3_squeeze,.-SHA3_squeeze
.align 256
.quad 0,0,0,0,0,0,0,0
.type iotas,@object
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.size iotas,.-iotas
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.section .note.gnu.property,"a",@note
.long 4,2f-1f,5
.byte 0x47,0x4E,0x55,0
1: .long 0xc0000002,4,3
.align 8
2:
|
VR4sigma1/spectre2
| 10,572
|
src/asm/keccakf1600_x86-64-mingw64.s
|
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.def __KeccakF1600; .scl 3; .type 32; .endef
.p2align 5
__KeccakF1600:
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp .Loop
.p2align 5
.Loop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz .Loop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.globl KeccakF1600
.def KeccakF1600; .scl 2; .type 32; .endef
.p2align 5
KeccakF1600:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_KeccakF1600:
movq %rcx,%rdi
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
leaq 100(%rdi),%rdi
subq $200,%rsp
.LSEH_body_KeccakF1600:
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
leaq 248(%rsp),%r11
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.LSEH_epilogue_KeccakF1600:
mov 8(%r11),%rdi
mov 16(%r11),%rsi
.byte 0xf3,0xc3
.LSEH_end_KeccakF1600:
.globl SHA3_absorb
.def SHA3_absorb; .scl 2; .type 32; .endef
.p2align 5
SHA3_absorb:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_SHA3_absorb:
movq %rcx,%rdi
movq %rdx,%rsi
movq %r8,%rdx
movq %r9,%rcx
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
leaq 100(%rdi),%rdi
subq $232,%rsp
.LSEH_body_SHA3_absorb:
movq %rsi,%r9
leaq 100(%rsp),%rsi
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
movq %rcx,216-100(%rsi)
.Loop_absorb:
cmpq %rcx,%rdx
jc .Ldone_absorb
shrq $3,%rcx
leaq -100(%rdi),%r8
.Lblock_absorb:
movq (%r9),%rax
leaq 8(%r9),%r9
xorq (%r8),%rax
leaq 8(%r8),%r8
subq $8,%rdx
movq %rax,-8(%r8)
subq $1,%rcx
jnz .Lblock_absorb
movq %r9,200-100(%rsi)
movq %rdx,208-100(%rsi)
call __KeccakF1600
movq 200-100(%rsi),%r9
movq 208-100(%rsi),%rdx
movq 216-100(%rsi),%rcx
jmp .Loop_absorb
.p2align 5
.Ldone_absorb:
movq %rdx,%rax
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq 280(%rsp),%r11
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.LSEH_epilogue_SHA3_absorb:
mov 8(%r11),%rdi
mov 16(%r11),%rsi
.byte 0xf3,0xc3
.LSEH_end_SHA3_absorb:
.globl SHA3_squeeze
.def SHA3_squeeze; .scl 2; .type 32; .endef
.p2align 5
SHA3_squeeze:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_SHA3_squeeze:
movq %rcx,%rdi
movq %rdx,%rsi
movq %r8,%rdx
movq %r9,%rcx
pushq %r12
pushq %r13
pushq %r14
subq $32,%rsp
.LSEH_body_SHA3_squeeze:
shrq $3,%rcx
movq %rdi,%r8
movq %rsi,%r12
movq %rdx,%r13
movq %rcx,%r14
jmp .Loop_squeeze
.p2align 5
.Loop_squeeze:
cmpq $8,%r13
jb .Ltail_squeeze
movq (%r8),%rax
leaq 8(%r8),%r8
movq %rax,(%r12)
leaq 8(%r12),%r12
subq $8,%r13
jz .Ldone_squeeze
subq $1,%rcx
jnz .Loop_squeeze
movq %rdi,%rcx
call KeccakF1600
movq %rdi,%r8
movq %r14,%rcx
jmp .Loop_squeeze
.Ltail_squeeze:
movq %r8,%rsi
movq %r12,%rdi
movq %r13,%rcx
.byte 0xf3,0xa4
.Ldone_squeeze:
movq 32(%rsp),%r14
movq 40(%rsp),%r13
movq 48(%rsp),%r12
addq $56,%rsp
.LSEH_epilogue_SHA3_squeeze:
mov 8(%rsp),%rdi
mov 16(%rsp),%rsi
.byte 0xf3,0xc3
.LSEH_end_SHA3_squeeze:
.p2align 8
.quad 0,0,0,0,0,0,0,0
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.section .pdata
.p2align 2
.rva .LSEH_begin_KeccakF1600
.rva .LSEH_body_KeccakF1600
.rva .LSEH_info_KeccakF1600_prologue
.rva .LSEH_body_KeccakF1600
.rva .LSEH_epilogue_KeccakF1600
.rva .LSEH_info_KeccakF1600_body
.rva .LSEH_epilogue_KeccakF1600
.rva .LSEH_end_KeccakF1600
.rva .LSEH_info_KeccakF1600_epilogue
.rva .LSEH_begin_SHA3_absorb
.rva .LSEH_body_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_prologue
.rva .LSEH_body_SHA3_absorb
.rva .LSEH_epilogue_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_body
.rva .LSEH_epilogue_SHA3_absorb
.rva .LSEH_end_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_epilogue
.rva .LSEH_begin_SHA3_squeeze
.rva .LSEH_body_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_prologue
.rva .LSEH_body_SHA3_squeeze
.rva .LSEH_epilogue_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_body
.rva .LSEH_epilogue_SHA3_squeeze
.rva .LSEH_end_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_epilogue
.section .xdata
.p2align 3
.LSEH_info_KeccakF1600_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_KeccakF1600_body:
.byte 1,0,18,0
.byte 0x00,0xf4,0x19,0x00
.byte 0x00,0xe4,0x1a,0x00
.byte 0x00,0xd4,0x1b,0x00
.byte 0x00,0xc4,0x1c,0x00
.byte 0x00,0x54,0x1d,0x00
.byte 0x00,0x34,0x1e,0x00
.byte 0x00,0x74,0x20,0x00
.byte 0x00,0x64,0x21,0x00
.byte 0x00,0x01,0x1f,0x00
.byte 0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_KeccakF1600_epilogue:
.byte 1,0,5,11
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0xb3
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_absorb_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_SHA3_absorb_body:
.byte 1,0,18,0
.byte 0x00,0xf4,0x1d,0x00
.byte 0x00,0xe4,0x1e,0x00
.byte 0x00,0xd4,0x1f,0x00
.byte 0x00,0xc4,0x20,0x00
.byte 0x00,0x54,0x21,0x00
.byte 0x00,0x34,0x22,0x00
.byte 0x00,0x74,0x24,0x00
.byte 0x00,0x64,0x25,0x00
.byte 0x00,0x01,0x23,0x00
.byte 0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_absorb_epilogue:
.byte 1,0,5,11
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0xb3
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_squeeze_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_SHA3_squeeze_body:
.byte 1,0,11,0
.byte 0x00,0xe4,0x04,0x00
.byte 0x00,0xd4,0x05,0x00
.byte 0x00,0xc4,0x06,0x00
.byte 0x00,0x74,0x08,0x00
.byte 0x00,0x64,0x09,0x00
.byte 0x00,0x62
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.LSEH_info_SHA3_squeeze_epilogue:
.byte 1,0,4,0
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0x00,0x00,0x00
|
w1s3one805/Googvm
| 1,406
|
kernel_loader/src/test_elf.S
|
# Copyright 2022 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Build instructions:
# x86_64-linux-gnu-as test_elf.S -o test_elf.o
# x86_64-linux-gnu-ld test_elf.o -o test_elf.bin -T test_elf.ld
.intel_syntax noprefix
.section .rodata
hello_world:
.string "Hello world!\n"
.set hello_size, .-hello_world
.text
.globl _start
_start:
lea rsi, [rip + hello_world] # rsi -> message string
mov rcx, hello_size # rcx = length of message
mov dx, 0x3F8 # dx = COM1 port
.print_loop:
# Wait for the transmit buffer to be empty by polling the line status.
add dx, 5 # dx = line status register
.wait_empty:
in al, dx # read line status
test al, 0x20 # check buffer empty flag
jz .wait_empty # keep waiting if flag is not set
.wait_done:
sub dx, 5 # dx = data register
# Load a byte of the message and send it to the serial port.
lodsb # load message byte from RSI to AL
out dx, al # send byte to serial port
dec rcx # rcx--
jnz .print_loop # repeat if rcx != 0
.done:
int3 # cause vcpu to exit
|
wangyanyu1817/Starry_homework
| 1,598
|
modules/axhal/linker.lds.S
|
OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
.rodata : ALIGN(4K) {
_srodata = .;
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
. = ALIGN(4K);
_erodata = .;
}
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = ALIGN(64);
_percpu_size_aligned = .;
. = _percpu_load_start + _percpu_size_aligned * %SMP%;
}
. = _percpu_start + SIZEOF(.percpu);
_percpu_end = .;
. = ALIGN(4K);
_edata = .;
.bss : ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
|
wangyanyu1817/Starry_homework
| 4,307
|
modules/axhal/src/platform/x86_pc/multiboot.S
|
# Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 510
# 0xffff_ff80_0000_0000 ~ 0xffff_ff80_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
wangyanyu1817/Starry_homework
| 1,965
|
modules/axhal/src/platform/x86_pc/ap_start.S
|
# Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
wangyanyu1817/Starry_homework
| 2,325
|
modules/axhal/src/arch/riscv/trap.S
|
.macro SAVE_REGS, from_user
addi sp, sp, -{trapframe_size}
PUSH_GENERAL_REGS
csrr t0, sepc
csrr t1, sstatus
csrrw t2, sscratch, zero // save sscratch (sp) and zero it
STR t0, sp, 31 // tf.sepc
STR t1, sp, 32 // tf.sstatus
STR t2, sp, 1 // tf.regs.sp
.short 0xa622 // fsd fs0,264(sp)
.short 0xaa26 // fsd fs1,272(sp)
.if \from_user == 1
LDR t1, sp, 2 // load user gp with CPU ID
LDR t0, sp, 3 // load supervisor tp
STR gp, sp, 2 // save user gp and tp
STR tp, sp, 3
mv gp, t1
mv tp, t0
.endif
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
LDR t1, sp, 2
LDR t0, sp, 3
STR gp, sp, 2 // load user gp and tp
STR tp, sp, 3 // save supervisor tp
mv gp, t1
mv tp, t0
addi t0, sp, {trapframe_size} // put supervisor sp to scratch
csrw sscratch, t0
.endif
LDR t0, sp, 31
LDR t1, sp, 32
csrw sepc, t0
csrw sstatus, t1
.short 0x2432 // fld fs0,264(sp)
.short 0x24d2 // fld fs1,272(sp)
POP_GENERAL_REGS
LDR sp, sp, 1 // load sp from tf.regs.sp
.endm
.section .text
.balign 4
.global trap_vector_base
trap_vector_base:
// sscratch == 0: trap from S mode
// sscratch != 0: trap from U mode
csrrw sp, sscratch, sp // switch sscratch and sp
bnez sp, .Ltrap_entry_u
csrr sp, sscratch // put supervisor sp back
j .Ltrap_entry_s
.Ltrap_entry_s:
SAVE_REGS 0
mv a0, sp
li a1, 0
call riscv_trap_handler
RESTORE_REGS 0
sret
.Ltrap_entry_u:
SAVE_REGS 1
mv a0, sp
li a1, 1
call riscv_trap_handler
RESTORE_REGS 1
sret
.altmacro
.macro COPY n
ld t2, (\n)*8(a0)
sd t2, (\n)*8(a1)
.endm
.section .text
.globl __copy
__copy:
# __copy(
# frame_address: *const TrapFrame,
# kernel_base: *mut T
# )
.set n, 0
.rept 33
COPY %n
.set n, n + 1
.endr
ret
|
wangyanyu1817/Starry_homework
| 1,505
|
modules/axhal/src/arch/x86_64/trap.S
|
.equ NUM_INT, 256
.altmacro
.macro DEF_HANDLER, i
.Ltrap_handler_\i:
.if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17
# error code pushed by CPU
push \i # interrupt vector
jmp .Ltrap_common
.else
push 0 # fill in error code in TrapFrame
push \i # interrupt vector
jmp .Ltrap_common
.endif
.endm
.macro DEF_TABLE_ENTRY, i
.quad .Ltrap_handler_\i
.endm
.section .text
.code64
_trap_handlers:
.set i, 0
.rept NUM_INT
DEF_HANDLER %i
.set i, i + 1
.endr
.Ltrap_common:
test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space
jz 1f
swapgs
1:
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_trap_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space
jz 2f
swapgs
2:
add rsp, 16 # pop vector, error_code
iretq
.section .rodata
.global trap_handler_table
trap_handler_table:
.set i, 0
.rept NUM_INT
DEF_TABLE_ENTRY %i
.set i, i + 1
.endr
|
wangyanyu1817/Starry_homework
| 4,435
|
modules/axhal/src/arch/aarch64/trap.S
|
.macro clear_gp_regs
.irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
mov x\n, xzr
.endr
.endm
.macro SAVE_REGS, el
stp x0, x1, [sp]
stp x2, x3, [sp, 2 * 8]
stp x4, x5, [sp, 4 * 8]
stp x6, x7, [sp, 6 * 8]
stp x8, x9, [sp, 8 * 8]
stp x10, x11, [sp, 10 * 8]
stp x12, x13, [sp, 12 * 8]
stp x14, x15, [sp, 14 * 8]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 18 * 8]
stp x20, x21, [sp, 20 * 8]
stp x22, x23, [sp, 22 * 8]
stp x24, x25, [sp, 24 * 8]
stp x26, x27, [sp, 26 * 8]
stp x28, x29, [sp, 28 * 8]
str x30, [sp, 30 * 8]
mrs x10, elr_el1
mrs x11, spsr_el1
stp x10, x11, [sp, 32 * 8]
.if \el == 0
clear_gp_regs
mrs x12, tpidr_el0 // save user tls pointer
ldr x13, [sp, 31 * 8] // restore current ktask ptr
mrs x9, sp_el0 // save user stack pointer */
msr sp_el0, x13 // restore kernel task ptr
.else
mov x9, sp
mov x12, xzr
.endif
str x12, [sp, 34 * 8] // save tpidr_el0
str x9, [sp, 31 * 8] // save user sp
.endm
.macro RESTORE_REGS, el
ldp x30, x9, [sp, 30 * 8] // load user sp_el0
ldp x10, x11, [sp, 32 * 8] // load ELR, SPSR
msr elr_el1, x10
msr spsr_el1, x11
ldr x12, [sp, 34 * 8]
.if \el == 0
msr tpidr_el0, x12 // restore user tls pointer
mrs x13, sp_el0 // save current ktask ptr
str x13, [sp, 31 * 8]
msr sp_el0, x9 // restore user sp
.endif
ldp x28, x29, [sp, 28 * 8]
ldp x26, x27, [sp, 26 * 8]
ldp x24, x25, [sp, 24 * 8]
ldp x22, x23, [sp, 22 * 8]
ldp x20, x21, [sp, 20 * 8]
ldp x18, x19, [sp, 18 * 8]
ldp x16, x17, [sp, 16 * 8]
ldp x14, x15, [sp, 14 * 8]
ldp x12, x13, [sp, 12 * 8]
ldp x10, x11, [sp, 10 * 8]
ldp x8, x9, [sp, 8 * 8]
ldp x6, x7, [sp, 6 * 8]
ldp x4, x5, [sp, 4 * 8]
ldp x2, x3, [sp, 2 * 8]
ldp x0, x1, [sp]
add sp, sp, 35 * 8
.endm
.macro HANDLE_TRAP, el, ht, regsize, label
.p2align 7
b handle_el\el\ht\()_\regsize\()_\label
.endm
.macro HANDLE, el, ht, regsize, label
.section .text
handle_el\el\ht\()_\regsize\()_\label:
sub sp, sp, 35 * 8
SAVE_REGS \el
mov x0, sp
bl handle_el\el\ht\()_\regsize\()_\label\()_exception
.if \el == 1
b ret_to_kernel
.else
b ret_to_user
.endif
.endm
.section .text
.p2align 11
.global exception_vector_base
exception_vector_base:
// current EL, with SP_EL0
HANDLE_TRAP 1, t, 64, sync
HANDLE_TRAP 1, t, 64, irq
HANDLE_TRAP 1, t, 64, fiq
HANDLE_TRAP 1, t, 64, error
// current EL, with SP_ELx
HANDLE_TRAP 1, h, 64, sync
HANDLE_TRAP 1, h, 64, irq
HANDLE_TRAP 1, h, 64, fiq
HANDLE_TRAP 1, h, 64, error
// lower EL, aarch64 with SP_EL0
HANDLE_TRAP 0, t, 64, sync
HANDLE_TRAP 0, t, 64, irq
HANDLE_TRAP 0, t, 64, fiq
HANDLE_TRAP 0, t, 64, error
// lower EL, aarch32
HANDLE_TRAP 0, t, 32, sync
HANDLE_TRAP 0, t, 32, irq
HANDLE_TRAP 0, t, 32, fiq
HANDLE_TRAP 0, t, 32, error
/*
* used to create handle_el_label_trap
*/
// current EL, with SP_EL0
HANDLE 1, t, 64, sync
HANDLE 1, t, 64, irq
HANDLE 1, t, 64, fiq
HANDLE 1, t, 64, error
// current EL, with SP_ELx
HANDLE 1, h, 64, sync
HANDLE 1, h, 64, irq
HANDLE 1, h, 64, fiq
HANDLE 1, h, 64, error
// lower EL, aarch64 with SP_EL0
HANDLE 0, t, 64, sync
HANDLE 0, t, 64, irq
HANDLE 0, t, 64, fiq
HANDLE 0, t, 64, error
// lower EL, aarch32
HANDLE 0, t, 32, sync
HANDLE 0, t, 32, irq
HANDLE 0, t, 32, fiq
HANDLE 0, t, 32, error
.section .text
.global ret_to_kernel
ret_to_kernel:
RESTORE_REGS 1
eret
.section .text
.global ret_to_user
ret_to_user:
RESTORE_REGS 0
eret
.section .text
.global ret_to_first_user
ret_to_first_user:
mov sp, x0
b ret_to_user
.altmacro
.macro COPY n
ldr x3, [x0, (\n)*8]
str x3, [x1, (\n)*8]
.endm
.section .text
.globl __copy
__copy:
# __copy(
# frame_address: *const TrapFrame,
# kernel_base: *mut T
# )
.set n, 0
.rept 35
COPY %n
.set n, n + 1
.endr
ret
|
Wankupi/kernel
| 3,572
|
src/arch/trap.S
|
#define TRAP_FRAME_ADDR (0x0 - 0x1000 - 0x1000)
#define OFFSET_REGS 0x28
.extern kernel_trap_entry
.section .text._trap_entry_early
.global _trap_entry_early
.align 2
_trap_entry_early:
call kernel_trap_entry
j _trap_entry
.section .text.trampoline
.global _trap_entry
.align 2
_trap_entry:
csrw sscratch, a0
li a0, TRAP_FRAME_ADDR
add a0, a0, OFFSET_REGS
sd x1, 0(a0)
sd x2, 8(a0)
sd x3, 16(a0)
sd x4, 24(a0)
sd x5, 32(a0)
sd x6, 40(a0)
sd x7, 48(a0)
sd x8, 56(a0)
sd x9, 64(a0)
csrr t0, sscratch
sd t0, 72(a0)
sd x11, 80(a0)
sd x12, 88(a0)
sd x13, 96(a0)
sd x14, 104(a0)
sd x15, 112(a0)
sd x16, 120(a0)
sd x17, 128(a0)
sd x18, 136(a0)
sd x19, 144(a0)
sd x20, 152(a0)
sd x21, 160(a0)
sd x22, 168(a0)
sd x23, 176(a0)
sd x24, 184(a0)
sd x25, 192(a0)
sd x26, 200(a0)
sd x27, 208(a0)
sd x28, 216(a0)
sd x29, 224(a0)
sd x30, 232(a0)
sd x31, 240(a0)
csrr t0, sepc
sd t0, 248(a0)
addi a0, a0, -OFFSET_REGS
ld a1, 0(a0) # kernel satp
ld sp, 8(a0) # kernel sp
ld ra, 16(a0) # kernel trap function
ld tp, 24(a0) # hart id
sfence.vma zero, zero
csrw satp, a1
sfence.vma zero, zero
jalr ra # jump to kernel trap function
.global _user_ret
.align 2
# _user_ret(a0: user satp)
_user_ret:
li a0, TRAP_FRAME_ADDR
# satp
ld a1, 0x20(a0)
add a0, a0, OFFSET_REGS
sfence.vma zero, zero
csrw satp, a1
sfence.vma zero, zero
# pc
ld t0, 248(a0)
csrw sepc, t0
# regs
ld x1, 0(a0)
ld x2, 8(a0)
ld x3, 16(a0)
ld x4, 24(a0)
ld x5, 32(a0)
ld x6, 40(a0)
ld x7, 48(a0)
ld x8, 56(a0)
ld x9, 64(a0)
ld x11, 80(a0)
ld x12, 88(a0)
ld x13, 96(a0)
ld x14, 104(a0)
ld x15, 112(a0)
ld x16, 120(a0)
ld x17, 128(a0)
ld x18, 136(a0)
ld x19, 144(a0)
ld x20, 152(a0)
ld x21, 160(a0)
ld x22, 168(a0)
ld x23, 176(a0)
ld x24, 184(a0)
ld x25, 192(a0)
ld x26, 200(a0)
ld x27, 208(a0)
ld x28, 216(a0)
ld x29, 224(a0)
ld x30, 232(a0)
ld x31, 240(a0)
ld a0, 72(a0)
sret
.global _copy_u_s
.align 2
_copy_u_s:
# a0: src, a1: dst, a2: length, a3: user_satp
# return a0: 0 if success, -1 if fail
# backup
# a4: kernel satp
# a5: kernel stvec
# a6: kernel sie
# a7: kernel sstatus
csrr a4, satp
csrr a5, stvec
csrr a6, sie
csrr a7, sstatus
# set csr regs
la t0, .Lerr
csrw stvec, t0
la t0, .Lstart
csrw sepc, t0
li t0, 2
csrw sie, t0
li t0, 1
slli t0, t0, 18
csrs sstatus, t0
csrw satp, a3
.Lstart:
# start copy
beqz a2, .Lend
lb t1, 0(a0)
sb t1, 0(a1)
addi a0, a0, 1
addi a1, a1, 1
addi a2, a2, -1
j .Lstart
.Lend:
li a0, 0
j .Lret
.Lerr:
li a0, -1
j .Lret
.Lret:
# restore
csrw satp, a4
csrw stvec, a5
csrw sie, a6
csrw sstatus, a7
ret
|
WashCout/PROJETO_INTEGRADOR_III_UNIVESP
| 15,124
|
ambientefrutos/Lib/site-packages/prophet/stan_model/cmdstan-2.33.1/stan/lib/stan_math/lib/tbb_2020.3/src/tbb/ia64-gas/atomic_support.s
|
// Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd1__TBB_full_fence#
.global __TBB_machine_fetchadd1__TBB_full_fence#
__TBB_machine_fetchadd1__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd1acquire
}
.endp __TBB_machine_fetchadd1__TBB_full_fence#
.proc __TBB_machine_fetchadd1acquire#
.global __TBB_machine_fetchadd1acquire#
__TBB_machine_fetchadd1acquire:
ld1 r9=[r32]
;;
Retry_1acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg1.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_1acquire
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd1acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore1__TBB_full_fence#
.global __TBB_machine_fetchstore1__TBB_full_fence#
__TBB_machine_fetchstore1__TBB_full_fence:
mf
;;
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1__TBB_full_fence#
.proc __TBB_machine_fetchstore1acquire#
.global __TBB_machine_fetchstore1acquire#
__TBB_machine_fetchstore1acquire:
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp1__TBB_full_fence#
.global __TBB_machine_cmpswp1__TBB_full_fence#
__TBB_machine_cmpswp1__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp1acquire
}
.endp __TBB_machine_cmpswp1__TBB_full_fence#
.proc __TBB_machine_cmpswp1acquire#
.global __TBB_machine_cmpswp1acquire#
__TBB_machine_cmpswp1acquire:
zxt1 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg1.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp1acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd2__TBB_full_fence#
.global __TBB_machine_fetchadd2__TBB_full_fence#
__TBB_machine_fetchadd2__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd2acquire
}
.endp __TBB_machine_fetchadd2__TBB_full_fence#
.proc __TBB_machine_fetchadd2acquire#
.global __TBB_machine_fetchadd2acquire#
__TBB_machine_fetchadd2acquire:
ld2 r9=[r32]
;;
Retry_2acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg2.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_2acquire
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd2acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore2__TBB_full_fence#
.global __TBB_machine_fetchstore2__TBB_full_fence#
__TBB_machine_fetchstore2__TBB_full_fence:
mf
;;
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2__TBB_full_fence#
.proc __TBB_machine_fetchstore2acquire#
.global __TBB_machine_fetchstore2acquire#
__TBB_machine_fetchstore2acquire:
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp2__TBB_full_fence#
.global __TBB_machine_cmpswp2__TBB_full_fence#
__TBB_machine_cmpswp2__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp2acquire
}
.endp __TBB_machine_cmpswp2__TBB_full_fence#
.proc __TBB_machine_cmpswp2acquire#
.global __TBB_machine_cmpswp2acquire#
__TBB_machine_cmpswp2acquire:
zxt2 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg2.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp2acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd4__TBB_full_fence#
.global __TBB_machine_fetchadd4__TBB_full_fence#
__TBB_machine_fetchadd4__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd4acquire
}
.endp __TBB_machine_fetchadd4__TBB_full_fence#
.proc __TBB_machine_fetchadd4acquire#
.global __TBB_machine_fetchadd4acquire#
__TBB_machine_fetchadd4acquire:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_4acquire
(p8) br.cond.dpnt Dec_4acquire
;;
ld4 r9=[r32]
;;
Retry_4acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg4.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_4acquire
br.ret.sptk.many b0
Inc_4acquire:
fetchadd4.acq r8=[r32],1
br.ret.sptk.many b0
Dec_4acquire:
fetchadd4.acq r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd4acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore4__TBB_full_fence#
.global __TBB_machine_fetchstore4__TBB_full_fence#
__TBB_machine_fetchstore4__TBB_full_fence:
mf
;;
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4__TBB_full_fence#
.proc __TBB_machine_fetchstore4acquire#
.global __TBB_machine_fetchstore4acquire#
__TBB_machine_fetchstore4acquire:
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp4__TBB_full_fence#
.global __TBB_machine_cmpswp4__TBB_full_fence#
__TBB_machine_cmpswp4__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp4acquire
}
.endp __TBB_machine_cmpswp4__TBB_full_fence#
.proc __TBB_machine_cmpswp4acquire#
.global __TBB_machine_cmpswp4acquire#
__TBB_machine_cmpswp4acquire:
zxt4 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg4.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp4acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd8__TBB_full_fence#
.global __TBB_machine_fetchadd8__TBB_full_fence#
__TBB_machine_fetchadd8__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd8acquire
}
.endp __TBB_machine_fetchadd8__TBB_full_fence#
.proc __TBB_machine_fetchadd8acquire#
.global __TBB_machine_fetchadd8acquire#
__TBB_machine_fetchadd8acquire:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_8acquire
(p8) br.cond.dpnt Dec_8acquire
;;
ld8 r9=[r32]
;;
Retry_8acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg8.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_8acquire
br.ret.sptk.many b0
Inc_8acquire:
fetchadd8.acq r8=[r32],1
br.ret.sptk.many b0
Dec_8acquire:
fetchadd8.acq r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd8acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore8__TBB_full_fence#
.global __TBB_machine_fetchstore8__TBB_full_fence#
__TBB_machine_fetchstore8__TBB_full_fence:
mf
;;
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8__TBB_full_fence#
.proc __TBB_machine_fetchstore8acquire#
.global __TBB_machine_fetchstore8acquire#
__TBB_machine_fetchstore8acquire:
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp8__TBB_full_fence#
.global __TBB_machine_cmpswp8__TBB_full_fence#
__TBB_machine_cmpswp8__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp8acquire
}
.endp __TBB_machine_cmpswp8__TBB_full_fence#
.proc __TBB_machine_cmpswp8acquire#
.global __TBB_machine_cmpswp8acquire#
__TBB_machine_cmpswp8acquire:
mov ar.ccv=r34
;;
cmpxchg8.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp8acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd1release#
.global __TBB_machine_fetchadd1release#
__TBB_machine_fetchadd1release:
ld1 r9=[r32]
;;
Retry_1release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg1.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_1release
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd1release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore1release#
.global __TBB_machine_fetchstore1release#
__TBB_machine_fetchstore1release:
mf
;;
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp1release#
.global __TBB_machine_cmpswp1release#
__TBB_machine_cmpswp1release:
zxt1 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg1.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp1release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd2release#
.global __TBB_machine_fetchadd2release#
__TBB_machine_fetchadd2release:
ld2 r9=[r32]
;;
Retry_2release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg2.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_2release
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd2release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore2release#
.global __TBB_machine_fetchstore2release#
__TBB_machine_fetchstore2release:
mf
;;
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp2release#
.global __TBB_machine_cmpswp2release#
__TBB_machine_cmpswp2release:
zxt2 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg2.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp2release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd4release#
.global __TBB_machine_fetchadd4release#
__TBB_machine_fetchadd4release:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_4release
(p8) br.cond.dpnt Dec_4release
;;
ld4 r9=[r32]
;;
Retry_4release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg4.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_4release
br.ret.sptk.many b0
Inc_4release:
fetchadd4.rel r8=[r32],1
br.ret.sptk.many b0
Dec_4release:
fetchadd4.rel r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd4release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore4release#
.global __TBB_machine_fetchstore4release#
__TBB_machine_fetchstore4release:
mf
;;
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp4release#
.global __TBB_machine_cmpswp4release#
__TBB_machine_cmpswp4release:
zxt4 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg4.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp4release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd8release#
.global __TBB_machine_fetchadd8release#
__TBB_machine_fetchadd8release:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_8release
(p8) br.cond.dpnt Dec_8release
;;
ld8 r9=[r32]
;;
Retry_8release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg8.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_8release
br.ret.sptk.many b0
Inc_8release:
fetchadd8.rel r8=[r32],1
br.ret.sptk.many b0
Dec_8release:
fetchadd8.rel r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd8release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore8release#
.global __TBB_machine_fetchstore8release#
__TBB_machine_fetchstore8release:
mf
;;
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp8release#
.global __TBB_machine_cmpswp8release#
__TBB_machine_cmpswp8release:
mov ar.ccv=r34
;;
cmpxchg8.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp8release#
|
WashCout/PROJETO_INTEGRADOR_III_UNIVESP
| 1,304
|
ambientefrutos/Lib/site-packages/prophet/stan_model/cmdstan-2.33.1/stan/lib/stan_math/lib/tbb_2020.3/src/tbb/ia64-gas/log2.s
|
// Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
.section .text
.align 16
// unsigned long __TBB_machine_lg( unsigned long x );
// r32 = x
.proc __TBB_machine_lg#
.global __TBB_machine_lg#
__TBB_machine_lg:
shr r16=r32,1 // .x
;;
shr r17=r32,2 // ..x
or r32=r32,r16 // xx
;;
shr r16=r32,3 // ...xx
or r32=r32,r17 // xxx
;;
shr r17=r32,5 // .....xxx
or r32=r32,r16 // xxxxx
;;
shr r16=r32,8 // ........xxxxx
or r32=r32,r17 // xxxxxxxx
;;
shr r17=r32,13
or r32=r32,r16 // 13x
;;
shr r16=r32,21
or r32=r32,r17 // 21x
;;
shr r17=r32,34
or r32=r32,r16 // 34x
;;
shr r16=r32,55
or r32=r32,r17 // 55x
;;
or r32=r32,r16 // 64x
;;
popcnt r8=r32
;;
add r8=-1,r8
br.ret.sptk.many b0
.endp __TBB_machine_lg#
|
WashCout/PROJETO_INTEGRADOR_III_UNIVESP
| 1,270
|
ambientefrutos/Lib/site-packages/prophet/stan_model/cmdstan-2.33.1/stan/lib/stan_math/lib/tbb_2020.3/src/tbb/ia64-gas/lock_byte.s
|
// Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Support for class TinyLock
.section .text
.align 16
// unsigned int __TBB_machine_trylockbyte( byte& flag );
// r32 = address of flag
.proc __TBB_machine_trylockbyte#
.global __TBB_machine_trylockbyte#
ADDRESS_OF_FLAG=r32
RETCODE=r8
FLAG=r9
BUSY=r10
SCRATCH=r11
__TBB_machine_trylockbyte:
ld1.acq FLAG=[ADDRESS_OF_FLAG]
mov BUSY=1
mov RETCODE=0
;;
cmp.ne p6,p0=0,FLAG
mov ar.ccv=r0
(p6) br.ret.sptk.many b0
;;
cmpxchg1.acq SCRATCH=[ADDRESS_OF_FLAG],BUSY,ar.ccv // Try to acquire lock
;;
cmp.eq p6,p0=0,SCRATCH
;;
(p6) mov RETCODE=1
br.ret.sptk.many b0
.endp __TBB_machine_trylockbyte#
|
WashCout/PROJETO_INTEGRADOR_III_UNIVESP
| 2,687
|
ambientefrutos/Lib/site-packages/prophet/stan_model/cmdstan-2.33.1/stan/lib/stan_math/lib/tbb_2020.3/src/tbb/ia64-gas/ia64_misc.s
|
// Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// RSE backing store pointer retrieval
.section .text
.align 16
.proc __TBB_get_bsp#
.global __TBB_get_bsp#
__TBB_get_bsp:
mov r8=ar.bsp
br.ret.sptk.many b0
.endp __TBB_get_bsp#
.section .text
.align 16
.proc __TBB_machine_load8_relaxed#
.global __TBB_machine_load8_relaxed#
__TBB_machine_load8_relaxed:
ld8 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load8_relaxed#
.section .text
.align 16
.proc __TBB_machine_store8_relaxed#
.global __TBB_machine_store8_relaxed#
__TBB_machine_store8_relaxed:
st8 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store8_relaxed#
.section .text
.align 16
.proc __TBB_machine_load4_relaxed#
.global __TBB_machine_load4_relaxed#
__TBB_machine_load4_relaxed:
ld4 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load4_relaxed#
.section .text
.align 16
.proc __TBB_machine_store4_relaxed#
.global __TBB_machine_store4_relaxed#
__TBB_machine_store4_relaxed:
st4 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store4_relaxed#
.section .text
.align 16
.proc __TBB_machine_load2_relaxed#
.global __TBB_machine_load2_relaxed#
__TBB_machine_load2_relaxed:
ld2 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load2_relaxed#
.section .text
.align 16
.proc __TBB_machine_store2_relaxed#
.global __TBB_machine_store2_relaxed#
__TBB_machine_store2_relaxed:
st2 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store2_relaxed#
.section .text
.align 16
.proc __TBB_machine_load1_relaxed#
.global __TBB_machine_load1_relaxed#
__TBB_machine_load1_relaxed:
ld1 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load1_relaxed#
.section .text
.align 16
.proc __TBB_machine_store1_relaxed#
.global __TBB_machine_store1_relaxed#
__TBB_machine_store1_relaxed:
st1 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store1_relaxed#
|
watertogas/risc_rust_os
| 2,695
|
os/src/trap/trap.S
|
.altmacro
.macro STORE_REGISTER n
sd x\n, \n*8(sp)
.endm
.macro LOAD_REGISTER n
ld x\n, \n*8(sp)
.endm
.section .text.trap
.global _user_trap_entry
.global _user_trap_return
.global _kernel_trap_entry
.global _kernel_trap_return
.align 2
_user_trap_entry:
#exchange stack, sscratch->user_stack, sp -> *Trapcontext in userspace
csrrw sp, sscratch, sp
#store x1(double words)
sd x1, 1*8(sp)
#skip x2, store x3~x31
.set n, 3
.rept 29
STORE_REGISTER %n
.set n, n+1
.endr
#now save sepc & sstatus
csrr x5, sepc
csrr x6, sstatus
sd x5, 32*8(sp)
sd x6, 33*8(sp)
#save user_stack
csrr x5, sscratch
sd x5, 2*8(sp)
#read *Trapcontext in kernelspace first
ld a4, 38*8(sp)
#read kernel stack
ld a5, 34*8(sp)
#read handler_addr
ld a7, 37*8(sp)
# now read kernel stap and switch to kernel space
ld a6, 35*8(sp)
csrw satp, a6
sfence.vma
#move to kernel stack
mv sp, a5
#set a0 for handler
mv a0, a4
jr a7
_user_trap_return:
#read context addr in userspace, a0 is *Trapcontext in kernelspace
ld x6, 38*8(a0)
#store context addr in kernel space to Trapcontext
sd a0, 38*8(a0)
#read user stap
ld x5, 36*8(a0)
#switch to userspace
csrw satp, x5
sfence.vma
#move sp -> *Trapcontext in userspace, then we can restore registers
mv sp, x6
#recover x1(double words)
ld x1, 1*8(sp)
#skip x2, recover x3~x31
.set n, 3
.rept 29
LOAD_REGISTER %n
.set n, n+1
.endr
#recover sepc & sstatus
ld x5, 32*8(sp)
ld x6, 33*8(sp)
csrw sepc, x5
csrw sstatus, x6
#recover user_stack
ld x5, 2*8(sp)
csrw sscratch, x5
#switch to user stack, sscratch -> *Trapcontext
csrrw sp, sscratch, sp
#return from scall
sret
.align 2
#we did not know when the irq signal come, so all registers
#should be restored
_kernel_trap_entry:
addi sp, sp, -34*8
#store common registers, x0(zero), x2(sp), x4(thread pointer) are ignored
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
STORE_REGISTER %n
.set n, n+1
.endr
#now save spec & sstatus
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
#get trap handler addr from sscratch
csrr t2, sscratch
jalr t2
_kernel_trap_return:
#recover spec & sstatus first
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
#recover registers
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_REGISTER %n
.set n, n+1
.endr
#recover sp
addi sp, sp, 34*8
sret
|
weix2025/toy
| 5,330
|
deps/boringssl/linux-x86/crypto/test/trampoline-x86-linux.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl abi_test_trampoline
.hidden abi_test_trampoline
.type abi_test_trampoline,@function
.align 16
abi_test_trampoline:
.L_abi_test_trampoline_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 24(%esp),%ecx
movl (%ecx),%esi
movl 4(%ecx),%edi
movl 8(%ecx),%ebx
movl 12(%ecx),%ebp
subl $44,%esp
movl 72(%esp),%eax
xorl %ecx,%ecx
.L000loop:
cmpl 76(%esp),%ecx
jae .L001loop_done
movl (%eax,%ecx,4),%edx
movl %edx,(%esp,%ecx,4)
addl $1,%ecx
jmp .L000loop
.L001loop_done:
call *64(%esp)
addl $44,%esp
movl 24(%esp),%ecx
movl %esi,(%ecx)
movl %edi,4(%ecx)
movl %ebx,8(%ecx)
movl %ebp,12(%ecx)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size abi_test_trampoline,.-.L_abi_test_trampoline_begin
.globl abi_test_get_and_clear_direction_flag
.hidden abi_test_get_and_clear_direction_flag
.type abi_test_get_and_clear_direction_flag,@function
.align 16
abi_test_get_and_clear_direction_flag:
.L_abi_test_get_and_clear_direction_flag_begin:
pushfl
popl %eax
andl $1024,%eax
shrl $10,%eax
cld
ret
.size abi_test_get_and_clear_direction_flag,.-.L_abi_test_get_and_clear_direction_flag_begin
.globl abi_test_set_direction_flag
.hidden abi_test_set_direction_flag
.type abi_test_set_direction_flag,@function
.align 16
abi_test_set_direction_flag:
.L_abi_test_set_direction_flag_begin:
std
ret
.size abi_test_set_direction_flag,.-.L_abi_test_set_direction_flag_begin
.globl abi_test_clobber_eax
.hidden abi_test_clobber_eax
.type abi_test_clobber_eax,@function
.align 16
abi_test_clobber_eax:
.L_abi_test_clobber_eax_begin:
xorl %eax,%eax
ret
.size abi_test_clobber_eax,.-.L_abi_test_clobber_eax_begin
.globl abi_test_clobber_ebx
.hidden abi_test_clobber_ebx
.type abi_test_clobber_ebx,@function
.align 16
abi_test_clobber_ebx:
.L_abi_test_clobber_ebx_begin:
xorl %ebx,%ebx
ret
.size abi_test_clobber_ebx,.-.L_abi_test_clobber_ebx_begin
.globl abi_test_clobber_ecx
.hidden abi_test_clobber_ecx
.type abi_test_clobber_ecx,@function
.align 16
abi_test_clobber_ecx:
.L_abi_test_clobber_ecx_begin:
xorl %ecx,%ecx
ret
.size abi_test_clobber_ecx,.-.L_abi_test_clobber_ecx_begin
.globl abi_test_clobber_edx
.hidden abi_test_clobber_edx
.type abi_test_clobber_edx,@function
.align 16
abi_test_clobber_edx:
.L_abi_test_clobber_edx_begin:
xorl %edx,%edx
ret
.size abi_test_clobber_edx,.-.L_abi_test_clobber_edx_begin
.globl abi_test_clobber_edi
.hidden abi_test_clobber_edi
.type abi_test_clobber_edi,@function
.align 16
abi_test_clobber_edi:
.L_abi_test_clobber_edi_begin:
xorl %edi,%edi
ret
.size abi_test_clobber_edi,.-.L_abi_test_clobber_edi_begin
.globl abi_test_clobber_esi
.hidden abi_test_clobber_esi
.type abi_test_clobber_esi,@function
.align 16
abi_test_clobber_esi:
.L_abi_test_clobber_esi_begin:
xorl %esi,%esi
ret
.size abi_test_clobber_esi,.-.L_abi_test_clobber_esi_begin
.globl abi_test_clobber_ebp
.hidden abi_test_clobber_ebp
.type abi_test_clobber_ebp,@function
.align 16
abi_test_clobber_ebp:
.L_abi_test_clobber_ebp_begin:
xorl %ebp,%ebp
ret
.size abi_test_clobber_ebp,.-.L_abi_test_clobber_ebp_begin
.globl abi_test_clobber_xmm0
.hidden abi_test_clobber_xmm0
.type abi_test_clobber_xmm0,@function
.align 16
abi_test_clobber_xmm0:
.L_abi_test_clobber_xmm0_begin:
pxor %xmm0,%xmm0
ret
.size abi_test_clobber_xmm0,.-.L_abi_test_clobber_xmm0_begin
.globl abi_test_clobber_xmm1
.hidden abi_test_clobber_xmm1
.type abi_test_clobber_xmm1,@function
.align 16
abi_test_clobber_xmm1:
.L_abi_test_clobber_xmm1_begin:
pxor %xmm1,%xmm1
ret
.size abi_test_clobber_xmm1,.-.L_abi_test_clobber_xmm1_begin
.globl abi_test_clobber_xmm2
.hidden abi_test_clobber_xmm2
.type abi_test_clobber_xmm2,@function
.align 16
abi_test_clobber_xmm2:
.L_abi_test_clobber_xmm2_begin:
pxor %xmm2,%xmm2
ret
.size abi_test_clobber_xmm2,.-.L_abi_test_clobber_xmm2_begin
.globl abi_test_clobber_xmm3
.hidden abi_test_clobber_xmm3
.type abi_test_clobber_xmm3,@function
.align 16
abi_test_clobber_xmm3:
.L_abi_test_clobber_xmm3_begin:
pxor %xmm3,%xmm3
ret
.size abi_test_clobber_xmm3,.-.L_abi_test_clobber_xmm3_begin
.globl abi_test_clobber_xmm4
.hidden abi_test_clobber_xmm4
.type abi_test_clobber_xmm4,@function
.align 16
abi_test_clobber_xmm4:
.L_abi_test_clobber_xmm4_begin:
pxor %xmm4,%xmm4
ret
.size abi_test_clobber_xmm4,.-.L_abi_test_clobber_xmm4_begin
.globl abi_test_clobber_xmm5
.hidden abi_test_clobber_xmm5
.type abi_test_clobber_xmm5,@function
.align 16
abi_test_clobber_xmm5:
.L_abi_test_clobber_xmm5_begin:
pxor %xmm5,%xmm5
ret
.size abi_test_clobber_xmm5,.-.L_abi_test_clobber_xmm5_begin
.globl abi_test_clobber_xmm6
.hidden abi_test_clobber_xmm6
.type abi_test_clobber_xmm6,@function
.align 16
abi_test_clobber_xmm6:
.L_abi_test_clobber_xmm6_begin:
pxor %xmm6,%xmm6
ret
.size abi_test_clobber_xmm6,.-.L_abi_test_clobber_xmm6_begin
.globl abi_test_clobber_xmm7
.hidden abi_test_clobber_xmm7
.type abi_test_clobber_xmm7,@function
.align 16
abi_test_clobber_xmm7:
.L_abi_test_clobber_xmm7_begin:
pxor %xmm7,%xmm7
ret
.size abi_test_clobber_xmm7,.-.L_abi_test_clobber_xmm7_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
weix2025/toy
| 16,212
|
deps/boringssl/linux-x86/crypto/fipsmodule/vpaes-x86-linux.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
#ifdef BORINGSSL_DISPATCH_TEST
#endif
.align 64
.L_vpaes_consts:
.long 218628480,235210255,168496130,67568393
.long 252381056,17041926,33884169,51187212
.long 252645135,252645135,252645135,252645135
.long 1512730624,3266504856,1377990664,3401244816
.long 830229760,1275146365,2969422977,3447763452
.long 3411033600,2979783055,338359620,2782886510
.long 4209124096,907596821,221174255,1006095553
.long 191964160,3799684038,3164090317,1589111125
.long 182528256,1777043520,2877432650,3265356744
.long 1874708224,3503451415,3305285752,363511674
.long 1606117888,3487855781,1093350906,2384367825
.long 197121,67569157,134941193,202313229
.long 67569157,134941193,202313229,197121
.long 134941193,202313229,197121,67569157
.long 202313229,197121,67569157,134941193
.long 33619971,100992007,168364043,235736079
.long 235736079,33619971,100992007,168364043
.long 168364043,235736079,33619971,100992007
.long 100992007,168364043,235736079,33619971
.long 50462976,117835012,185207048,252579084
.long 252314880,51251460,117574920,184942860
.long 184682752,252054788,50987272,118359308
.long 118099200,185467140,251790600,50727180
.long 2946363062,528716217,1300004225,1881839624
.long 1532713819,1532713819,1532713819,1532713819
.long 3602276352,4288629033,3737020424,4153884961
.long 1354558464,32357713,2958822624,3775749553
.long 1201988352,132424512,1572796698,503232858
.long 2213177600,1597421020,4103937655,675398315
.long 2749646592,4273543773,1511898873,121693092
.long 3040248576,1103263732,2871565598,1608280554
.long 2236667136,2588920351,482954393,64377734
.long 3069987328,291237287,2117370568,3650299247
.long 533321216,3573750986,2572112006,1401264716
.long 1339849704,2721158661,548607111,3445553514
.long 2128193280,3054596040,2183486460,1257083700
.long 655635200,1165381986,3923443150,2344132524
.long 190078720,256924420,290342170,357187870
.long 1610966272,2263057382,4103205268,309794674
.long 2592527872,2233205587,1335446729,3402964816
.long 3973531904,3225098121,3002836325,1918774430
.long 3870401024,2102906079,2284471353,4117666579
.long 617007872,1021508343,366931923,691083277
.long 2528395776,3491914898,2968704004,1613121270
.long 3445188352,3247741094,844474987,4093578302
.long 651481088,1190302358,1689581232,574775300
.long 4289380608,206939853,2555985458,2489840491
.long 2130264064,327674451,3566485037,3349835193
.long 2470714624,316102159,3636825756,3393945945
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105
.byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83
.byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117
.byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105
.byte 118,101,114,115,105,116,121,41,0
.align 64
.hidden _vpaes_preheat
.type _vpaes_preheat,@function
.align 16
_vpaes_preheat:
addl (%esp),%ebp
movdqa -48(%ebp),%xmm7
movdqa -16(%ebp),%xmm6
ret
.size _vpaes_preheat,.-_vpaes_preheat
.hidden _vpaes_encrypt_core
.type _vpaes_encrypt_core,@function
.align 16
_vpaes_encrypt_core:
movl $16,%ecx
movl 240(%edx),%eax
movdqa %xmm6,%xmm1
movdqa (%ebp),%xmm2
pandn %xmm0,%xmm1
pand %xmm6,%xmm0
movdqu (%edx),%xmm5
.byte 102,15,56,0,208
movdqa 16(%ebp),%xmm0
pxor %xmm5,%xmm2
psrld $4,%xmm1
addl $16,%edx
.byte 102,15,56,0,193
leal 192(%ebp),%ebx
pxor %xmm2,%xmm0
jmp .L000enc_entry
.align 16
.L001enc_loop:
movdqa 32(%ebp),%xmm4
movdqa 48(%ebp),%xmm0
.byte 102,15,56,0,226
.byte 102,15,56,0,195
pxor %xmm5,%xmm4
movdqa 64(%ebp),%xmm5
pxor %xmm4,%xmm0
movdqa -64(%ebx,%ecx,1),%xmm1
.byte 102,15,56,0,234
movdqa 80(%ebp),%xmm2
movdqa (%ebx,%ecx,1),%xmm4
.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addl $16,%edx
pxor %xmm2,%xmm0
.byte 102,15,56,0,220
addl $16,%ecx
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andl $48,%ecx
subl $1,%eax
pxor %xmm3,%xmm0
.L000enc_entry:
movdqa %xmm6,%xmm1
movdqa -32(%ebp),%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm6,%xmm0
.byte 102,15,56,0,232
movdqa %xmm7,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm7,%xmm4
pxor %xmm5,%xmm3
.byte 102,15,56,0,224
movdqa %xmm7,%xmm2
pxor %xmm5,%xmm4
.byte 102,15,56,0,211
movdqa %xmm7,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%edx),%xmm5
pxor %xmm1,%xmm3
jnz .L001enc_loop
movdqa 96(%ebp),%xmm4
movdqa 112(%ebp),%xmm0
.byte 102,15,56,0,226
pxor %xmm5,%xmm4
.byte 102,15,56,0,195
movdqa 64(%ebx,%ecx,1),%xmm1
pxor %xmm4,%xmm0
.byte 102,15,56,0,193
ret
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
.hidden _vpaes_decrypt_core
.type _vpaes_decrypt_core,@function
.align 16
_vpaes_decrypt_core:
leal 608(%ebp),%ebx
movl 240(%edx),%eax
movdqa %xmm6,%xmm1
movdqa -64(%ebx),%xmm2
pandn %xmm0,%xmm1
movl %eax,%ecx
psrld $4,%xmm1
movdqu (%edx),%xmm5
shll $4,%ecx
pand %xmm6,%xmm0
.byte 102,15,56,0,208
movdqa -48(%ebx),%xmm0
xorl $48,%ecx
.byte 102,15,56,0,193
andl $48,%ecx
pxor %xmm5,%xmm2
movdqa 176(%ebp),%xmm5
pxor %xmm2,%xmm0
addl $16,%edx
leal -352(%ebx,%ecx,1),%ecx
jmp .L002dec_entry
.align 16
.L003dec_loop:
movdqa -32(%ebx),%xmm4
movdqa -16(%ebx),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
movdqa (%ebx),%xmm4
pxor %xmm1,%xmm0
movdqa 16(%ebx),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,197
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
movdqa 32(%ebx),%xmm4
pxor %xmm1,%xmm0
movdqa 48(%ebx),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,197
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
movdqa 64(%ebx),%xmm4
pxor %xmm1,%xmm0
movdqa 80(%ebx),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,197
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
addl $16,%edx
.byte 102,15,58,15,237,12
pxor %xmm1,%xmm0
subl $1,%eax
.L002dec_entry:
movdqa %xmm6,%xmm1
movdqa -32(%ebp),%xmm2
pandn %xmm0,%xmm1
pand %xmm6,%xmm0
psrld $4,%xmm1
.byte 102,15,56,0,208
movdqa %xmm7,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm7,%xmm4
pxor %xmm2,%xmm3
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm7,%xmm2
.byte 102,15,56,0,211
movdqa %xmm7,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%edx),%xmm0
pxor %xmm1,%xmm3
jnz .L003dec_loop
movdqa 96(%ebx),%xmm4
.byte 102,15,56,0,226
pxor %xmm0,%xmm4
movdqa 112(%ebx),%xmm0
movdqa (%ecx),%xmm2
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
.byte 102,15,56,0,194
ret
.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
.hidden _vpaes_schedule_core
.type _vpaes_schedule_core,@function
.align 16
_vpaes_schedule_core:
addl (%esp),%ebp
movdqu (%esi),%xmm0
movdqa 320(%ebp),%xmm2
movdqa %xmm0,%xmm3
leal (%ebp),%ebx
movdqa %xmm2,4(%esp)
call _vpaes_schedule_transform
movdqa %xmm0,%xmm7
testl %edi,%edi
jnz .L004schedule_am_decrypting
movdqu %xmm0,(%edx)
jmp .L005schedule_go
.L004schedule_am_decrypting:
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,217
movdqu %xmm3,(%edx)
xorl $48,%ecx
.L005schedule_go:
cmpl $192,%eax
ja .L006schedule_256
je .L007schedule_192
.L008schedule_128:
movl $10,%eax
.L009loop_schedule_128:
call _vpaes_schedule_round
decl %eax
jz .L010schedule_mangle_last
call _vpaes_schedule_mangle
jmp .L009loop_schedule_128
.align 16
.L007schedule_192:
movdqu 8(%esi),%xmm0
call _vpaes_schedule_transform
movdqa %xmm0,%xmm6
pxor %xmm4,%xmm4
movhlps %xmm4,%xmm6
movl $4,%eax
.L011loop_schedule_192:
call _vpaes_schedule_round
.byte 102,15,58,15,198,8
call _vpaes_schedule_mangle
call _vpaes_schedule_192_smear
call _vpaes_schedule_mangle
call _vpaes_schedule_round
decl %eax
jz .L010schedule_mangle_last
call _vpaes_schedule_mangle
call _vpaes_schedule_192_smear
jmp .L011loop_schedule_192
.align 16
.L006schedule_256:
movdqu 16(%esi),%xmm0
call _vpaes_schedule_transform
movl $7,%eax
.L012loop_schedule_256:
call _vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call _vpaes_schedule_round
decl %eax
jz .L010schedule_mangle_last
call _vpaes_schedule_mangle
pshufd $255,%xmm0,%xmm0
movdqa %xmm7,20(%esp)
movdqa %xmm6,%xmm7
call .L_vpaes_schedule_low_round
movdqa 20(%esp),%xmm7
jmp .L012loop_schedule_256
.align 16
.L010schedule_mangle_last:
leal 384(%ebp),%ebx
testl %edi,%edi
jnz .L013schedule_mangle_last_dec
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,193
leal 352(%ebp),%ebx
addl $32,%edx
.L013schedule_mangle_last_dec:
addl $-16,%edx
pxor 336(%ebp),%xmm0
call _vpaes_schedule_transform
movdqu %xmm0,(%edx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
ret
.size _vpaes_schedule_core,.-_vpaes_schedule_core
.hidden _vpaes_schedule_192_smear
.type _vpaes_schedule_192_smear,@function
.align 16
_vpaes_schedule_192_smear:
pshufd $128,%xmm6,%xmm1
pshufd $254,%xmm7,%xmm0
pxor %xmm1,%xmm6
pxor %xmm1,%xmm1
pxor %xmm0,%xmm6
movdqa %xmm6,%xmm0
movhlps %xmm1,%xmm6
ret
.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
.hidden _vpaes_schedule_round
.type _vpaes_schedule_round,@function
.align 16
_vpaes_schedule_round:
movdqa 8(%esp),%xmm2
pxor %xmm1,%xmm1
.byte 102,15,58,15,202,15
.byte 102,15,58,15,210,15
pxor %xmm1,%xmm7
pshufd $255,%xmm0,%xmm0
.byte 102,15,58,15,192,1
movdqa %xmm2,8(%esp)
.L_vpaes_schedule_low_round:
movdqa %xmm7,%xmm1
pslldq $4,%xmm7
pxor %xmm1,%xmm7
movdqa %xmm7,%xmm1
pslldq $8,%xmm7
pxor %xmm1,%xmm7
pxor 336(%ebp),%xmm7
movdqa -16(%ebp),%xmm4
movdqa -48(%ebp),%xmm5
movdqa %xmm4,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm4,%xmm0
movdqa -32(%ebp),%xmm2
.byte 102,15,56,0,208
pxor %xmm1,%xmm0
movdqa %xmm5,%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
movdqa %xmm5,%xmm4
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm5,%xmm2
.byte 102,15,56,0,211
pxor %xmm0,%xmm2
movdqa %xmm5,%xmm3
.byte 102,15,56,0,220
pxor %xmm1,%xmm3
movdqa 32(%ebp),%xmm4
.byte 102,15,56,0,226
movdqa 48(%ebp),%xmm0
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
pxor %xmm7,%xmm0
movdqa %xmm0,%xmm7
ret
.size _vpaes_schedule_round,.-_vpaes_schedule_round
.hidden _vpaes_schedule_transform
.type _vpaes_schedule_transform,@function
.align 16
_vpaes_schedule_transform:
movdqa -16(%ebp),%xmm2
movdqa %xmm2,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm0
movdqa (%ebx),%xmm2
.byte 102,15,56,0,208
movdqa 16(%ebx),%xmm0
.byte 102,15,56,0,193
pxor %xmm2,%xmm0
ret
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
.hidden _vpaes_schedule_mangle
.type _vpaes_schedule_mangle,@function
.align 16
_vpaes_schedule_mangle:
movdqa %xmm0,%xmm4
movdqa 128(%ebp),%xmm5
testl %edi,%edi
jnz .L014schedule_mangle_dec
addl $16,%edx
pxor 336(%ebp),%xmm4
.byte 102,15,56,0,229
movdqa %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
jmp .L015schedule_mangle_both
.align 16
.L014schedule_mangle_dec:
movdqa -16(%ebp),%xmm2
leal 416(%ebp),%esi
movdqa %xmm2,%xmm1
pandn %xmm4,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm4
movdqa (%esi),%xmm2
.byte 102,15,56,0,212
movdqa 16(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 32(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 48(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 64(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 80(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 96(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 112(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
addl $-16,%edx
.L015schedule_mangle_both:
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,217
addl $-16,%ecx
andl $48,%ecx
movdqu %xmm3,(%edx)
ret
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
.globl vpaes_set_encrypt_key
.hidden vpaes_set_encrypt_key
.type vpaes_set_encrypt_key,@function
.align 16
vpaes_set_encrypt_key:
.L_vpaes_set_encrypt_key_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L016pic
.L016pic:
popl %ebx
leal BORINGSSL_function_hit+5-.L016pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%eax
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movl %eax,%ebx
shrl $5,%ebx
addl $5,%ebx
movl %ebx,240(%edx)
movl $48,%ecx
movl $0,%edi
leal .L_vpaes_consts+0x30-.L017pic_point,%ebp
call _vpaes_schedule_core
.L017pic_point:
movl 48(%esp),%esp
xorl %eax,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_set_encrypt_key,.-.L_vpaes_set_encrypt_key_begin
.globl vpaes_set_decrypt_key
.hidden vpaes_set_decrypt_key
.type vpaes_set_decrypt_key,@function
.align 16
vpaes_set_decrypt_key:
.L_vpaes_set_decrypt_key_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%eax
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movl %eax,%ebx
shrl $5,%ebx
addl $5,%ebx
movl %ebx,240(%edx)
shll $4,%ebx
leal 16(%edx,%ebx,1),%edx
movl $1,%edi
movl %eax,%ecx
shrl $1,%ecx
andl $32,%ecx
xorl $32,%ecx
leal .L_vpaes_consts+0x30-.L018pic_point,%ebp
call _vpaes_schedule_core
.L018pic_point:
movl 48(%esp),%esp
xorl %eax,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_set_decrypt_key,.-.L_vpaes_set_decrypt_key_begin
.globl vpaes_encrypt
.hidden vpaes_encrypt
.type vpaes_encrypt,@function
.align 16
vpaes_encrypt:
.L_vpaes_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L019pic
.L019pic:
popl %ebx
leal BORINGSSL_function_hit+4-.L019pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
leal .L_vpaes_consts+0x30-.L020pic_point,%ebp
call _vpaes_preheat
.L020pic_point:
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%edi
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movdqu (%esi),%xmm0
call _vpaes_encrypt_core
movdqu %xmm0,(%edi)
movl 48(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_encrypt,.-.L_vpaes_encrypt_begin
.globl vpaes_decrypt
.hidden vpaes_decrypt
.type vpaes_decrypt,@function
.align 16
vpaes_decrypt:
.L_vpaes_decrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
leal .L_vpaes_consts+0x30-.L021pic_point,%ebp
call _vpaes_preheat
.L021pic_point:
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%edi
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movdqu (%esi),%xmm0
call _vpaes_decrypt_core
movdqu %xmm0,(%edi)
movl 48(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_decrypt,.-.L_vpaes_decrypt_begin
.globl vpaes_cbc_encrypt
.hidden vpaes_cbc_encrypt
.type vpaes_cbc_encrypt,@function
.align 16
vpaes_cbc_encrypt:
.L_vpaes_cbc_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
subl $16,%eax
jc .L022cbc_abort
leal -56(%esp),%ebx
movl 36(%esp),%ebp
andl $-16,%ebx
movl 40(%esp),%ecx
xchgl %esp,%ebx
movdqu (%ebp),%xmm1
subl %esi,%edi
movl %ebx,48(%esp)
movl %edi,(%esp)
movl %edx,4(%esp)
movl %ebp,8(%esp)
movl %eax,%edi
leal .L_vpaes_consts+0x30-.L023pic_point,%ebp
call _vpaes_preheat
.L023pic_point:
cmpl $0,%ecx
je .L024cbc_dec_loop
jmp .L025cbc_enc_loop
.align 16
.L025cbc_enc_loop:
movdqu (%esi),%xmm0
pxor %xmm1,%xmm0
call _vpaes_encrypt_core
movl (%esp),%ebx
movl 4(%esp),%edx
movdqa %xmm0,%xmm1
movdqu %xmm0,(%ebx,%esi,1)
leal 16(%esi),%esi
subl $16,%edi
jnc .L025cbc_enc_loop
jmp .L026cbc_done
.align 16
.L024cbc_dec_loop:
movdqu (%esi),%xmm0
movdqa %xmm1,16(%esp)
movdqa %xmm0,32(%esp)
call _vpaes_decrypt_core
movl (%esp),%ebx
movl 4(%esp),%edx
pxor 16(%esp),%xmm0
movdqa 32(%esp),%xmm1
movdqu %xmm0,(%ebx,%esi,1)
leal 16(%esi),%esi
subl $16,%edi
jnc .L024cbc_dec_loop
.L026cbc_done:
movl 8(%esp),%ebx
movl 48(%esp),%esp
movdqu %xmm1,(%ebx)
.L022cbc_abort:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_cbc_encrypt,.-.L_vpaes_cbc_encrypt_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
weix2025/toy
| 11,526
|
deps/boringssl/linux-x86/crypto/fipsmodule/md5-586-linux.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl md5_block_asm_data_order
.hidden md5_block_asm_data_order
.type md5_block_asm_data_order,@function
.align 16
md5_block_asm_data_order:
.L_md5_block_asm_data_order_begin:
pushl %esi
pushl %edi
movl 12(%esp),%edi
movl 16(%esp),%esi
movl 20(%esp),%ecx
pushl %ebp
shll $6,%ecx
pushl %ebx
addl %esi,%ecx
subl $64,%ecx
movl (%edi),%eax
pushl %ecx
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
.L000start:
movl %ecx,%edi
movl (%esi),%ebp
xorl %edx,%edi
andl %ebx,%edi
leal 3614090360(%eax,%ebp,1),%eax
xorl %edx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $7,%eax
movl 4(%esi),%ebp
addl %ebx,%eax
xorl %ecx,%edi
andl %eax,%edi
leal 3905402710(%edx,%ebp,1),%edx
xorl %ecx,%edi
addl %edi,%edx
movl %eax,%edi
roll $12,%edx
movl 8(%esi),%ebp
addl %eax,%edx
xorl %ebx,%edi
andl %edx,%edi
leal 606105819(%ecx,%ebp,1),%ecx
xorl %ebx,%edi
addl %edi,%ecx
movl %edx,%edi
roll $17,%ecx
movl 12(%esi),%ebp
addl %edx,%ecx
xorl %eax,%edi
andl %ecx,%edi
leal 3250441966(%ebx,%ebp,1),%ebx
xorl %eax,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $22,%ebx
movl 16(%esi),%ebp
addl %ecx,%ebx
xorl %edx,%edi
andl %ebx,%edi
leal 4118548399(%eax,%ebp,1),%eax
xorl %edx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $7,%eax
movl 20(%esi),%ebp
addl %ebx,%eax
xorl %ecx,%edi
andl %eax,%edi
leal 1200080426(%edx,%ebp,1),%edx
xorl %ecx,%edi
addl %edi,%edx
movl %eax,%edi
roll $12,%edx
movl 24(%esi),%ebp
addl %eax,%edx
xorl %ebx,%edi
andl %edx,%edi
leal 2821735955(%ecx,%ebp,1),%ecx
xorl %ebx,%edi
addl %edi,%ecx
movl %edx,%edi
roll $17,%ecx
movl 28(%esi),%ebp
addl %edx,%ecx
xorl %eax,%edi
andl %ecx,%edi
leal 4249261313(%ebx,%ebp,1),%ebx
xorl %eax,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $22,%ebx
movl 32(%esi),%ebp
addl %ecx,%ebx
xorl %edx,%edi
andl %ebx,%edi
leal 1770035416(%eax,%ebp,1),%eax
xorl %edx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $7,%eax
movl 36(%esi),%ebp
addl %ebx,%eax
xorl %ecx,%edi
andl %eax,%edi
leal 2336552879(%edx,%ebp,1),%edx
xorl %ecx,%edi
addl %edi,%edx
movl %eax,%edi
roll $12,%edx
movl 40(%esi),%ebp
addl %eax,%edx
xorl %ebx,%edi
andl %edx,%edi
leal 4294925233(%ecx,%ebp,1),%ecx
xorl %ebx,%edi
addl %edi,%ecx
movl %edx,%edi
roll $17,%ecx
movl 44(%esi),%ebp
addl %edx,%ecx
xorl %eax,%edi
andl %ecx,%edi
leal 2304563134(%ebx,%ebp,1),%ebx
xorl %eax,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $22,%ebx
movl 48(%esi),%ebp
addl %ecx,%ebx
xorl %edx,%edi
andl %ebx,%edi
leal 1804603682(%eax,%ebp,1),%eax
xorl %edx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $7,%eax
movl 52(%esi),%ebp
addl %ebx,%eax
xorl %ecx,%edi
andl %eax,%edi
leal 4254626195(%edx,%ebp,1),%edx
xorl %ecx,%edi
addl %edi,%edx
movl %eax,%edi
roll $12,%edx
movl 56(%esi),%ebp
addl %eax,%edx
xorl %ebx,%edi
andl %edx,%edi
leal 2792965006(%ecx,%ebp,1),%ecx
xorl %ebx,%edi
addl %edi,%ecx
movl %edx,%edi
roll $17,%ecx
movl 60(%esi),%ebp
addl %edx,%ecx
xorl %eax,%edi
andl %ecx,%edi
leal 1236535329(%ebx,%ebp,1),%ebx
xorl %eax,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $22,%ebx
movl 4(%esi),%ebp
addl %ecx,%ebx
leal 4129170786(%eax,%ebp,1),%eax
xorl %ebx,%edi
andl %edx,%edi
movl 24(%esi),%ebp
xorl %ecx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $5,%eax
addl %ebx,%eax
leal 3225465664(%edx,%ebp,1),%edx
xorl %eax,%edi
andl %ecx,%edi
movl 44(%esi),%ebp
xorl %ebx,%edi
addl %edi,%edx
movl %eax,%edi
roll $9,%edx
addl %eax,%edx
leal 643717713(%ecx,%ebp,1),%ecx
xorl %edx,%edi
andl %ebx,%edi
movl (%esi),%ebp
xorl %eax,%edi
addl %edi,%ecx
movl %edx,%edi
roll $14,%ecx
addl %edx,%ecx
leal 3921069994(%ebx,%ebp,1),%ebx
xorl %ecx,%edi
andl %eax,%edi
movl 20(%esi),%ebp
xorl %edx,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $20,%ebx
addl %ecx,%ebx
leal 3593408605(%eax,%ebp,1),%eax
xorl %ebx,%edi
andl %edx,%edi
movl 40(%esi),%ebp
xorl %ecx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $5,%eax
addl %ebx,%eax
leal 38016083(%edx,%ebp,1),%edx
xorl %eax,%edi
andl %ecx,%edi
movl 60(%esi),%ebp
xorl %ebx,%edi
addl %edi,%edx
movl %eax,%edi
roll $9,%edx
addl %eax,%edx
leal 3634488961(%ecx,%ebp,1),%ecx
xorl %edx,%edi
andl %ebx,%edi
movl 16(%esi),%ebp
xorl %eax,%edi
addl %edi,%ecx
movl %edx,%edi
roll $14,%ecx
addl %edx,%ecx
leal 3889429448(%ebx,%ebp,1),%ebx
xorl %ecx,%edi
andl %eax,%edi
movl 36(%esi),%ebp
xorl %edx,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $20,%ebx
addl %ecx,%ebx
leal 568446438(%eax,%ebp,1),%eax
xorl %ebx,%edi
andl %edx,%edi
movl 56(%esi),%ebp
xorl %ecx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $5,%eax
addl %ebx,%eax
leal 3275163606(%edx,%ebp,1),%edx
xorl %eax,%edi
andl %ecx,%edi
movl 12(%esi),%ebp
xorl %ebx,%edi
addl %edi,%edx
movl %eax,%edi
roll $9,%edx
addl %eax,%edx
leal 4107603335(%ecx,%ebp,1),%ecx
xorl %edx,%edi
andl %ebx,%edi
movl 32(%esi),%ebp
xorl %eax,%edi
addl %edi,%ecx
movl %edx,%edi
roll $14,%ecx
addl %edx,%ecx
leal 1163531501(%ebx,%ebp,1),%ebx
xorl %ecx,%edi
andl %eax,%edi
movl 52(%esi),%ebp
xorl %edx,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $20,%ebx
addl %ecx,%ebx
leal 2850285829(%eax,%ebp,1),%eax
xorl %ebx,%edi
andl %edx,%edi
movl 8(%esi),%ebp
xorl %ecx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $5,%eax
addl %ebx,%eax
leal 4243563512(%edx,%ebp,1),%edx
xorl %eax,%edi
andl %ecx,%edi
movl 28(%esi),%ebp
xorl %ebx,%edi
addl %edi,%edx
movl %eax,%edi
roll $9,%edx
addl %eax,%edx
leal 1735328473(%ecx,%ebp,1),%ecx
xorl %edx,%edi
andl %ebx,%edi
movl 48(%esi),%ebp
xorl %eax,%edi
addl %edi,%ecx
movl %edx,%edi
roll $14,%ecx
addl %edx,%ecx
leal 2368359562(%ebx,%ebp,1),%ebx
xorl %ecx,%edi
andl %eax,%edi
movl 20(%esi),%ebp
xorl %edx,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $20,%ebx
addl %ecx,%ebx
xorl %edx,%edi
xorl %ebx,%edi
leal 4294588738(%eax,%ebp,1),%eax
addl %edi,%eax
roll $4,%eax
movl 32(%esi),%ebp
movl %ebx,%edi
leal 2272392833(%edx,%ebp,1),%edx
addl %ebx,%eax
xorl %ecx,%edi
xorl %eax,%edi
movl 44(%esi),%ebp
addl %edi,%edx
movl %eax,%edi
roll $11,%edx
addl %eax,%edx
xorl %ebx,%edi
xorl %edx,%edi
leal 1839030562(%ecx,%ebp,1),%ecx
addl %edi,%ecx
roll $16,%ecx
movl 56(%esi),%ebp
movl %edx,%edi
leal 4259657740(%ebx,%ebp,1),%ebx
addl %edx,%ecx
xorl %eax,%edi
xorl %ecx,%edi
movl 4(%esi),%ebp
addl %edi,%ebx
movl %ecx,%edi
roll $23,%ebx
addl %ecx,%ebx
xorl %edx,%edi
xorl %ebx,%edi
leal 2763975236(%eax,%ebp,1),%eax
addl %edi,%eax
roll $4,%eax
movl 16(%esi),%ebp
movl %ebx,%edi
leal 1272893353(%edx,%ebp,1),%edx
addl %ebx,%eax
xorl %ecx,%edi
xorl %eax,%edi
movl 28(%esi),%ebp
addl %edi,%edx
movl %eax,%edi
roll $11,%edx
addl %eax,%edx
xorl %ebx,%edi
xorl %edx,%edi
leal 4139469664(%ecx,%ebp,1),%ecx
addl %edi,%ecx
roll $16,%ecx
movl 40(%esi),%ebp
movl %edx,%edi
leal 3200236656(%ebx,%ebp,1),%ebx
addl %edx,%ecx
xorl %eax,%edi
xorl %ecx,%edi
movl 52(%esi),%ebp
addl %edi,%ebx
movl %ecx,%edi
roll $23,%ebx
addl %ecx,%ebx
xorl %edx,%edi
xorl %ebx,%edi
leal 681279174(%eax,%ebp,1),%eax
addl %edi,%eax
roll $4,%eax
movl (%esi),%ebp
movl %ebx,%edi
leal 3936430074(%edx,%ebp,1),%edx
addl %ebx,%eax
xorl %ecx,%edi
xorl %eax,%edi
movl 12(%esi),%ebp
addl %edi,%edx
movl %eax,%edi
roll $11,%edx
addl %eax,%edx
xorl %ebx,%edi
xorl %edx,%edi
leal 3572445317(%ecx,%ebp,1),%ecx
addl %edi,%ecx
roll $16,%ecx
movl 24(%esi),%ebp
movl %edx,%edi
leal 76029189(%ebx,%ebp,1),%ebx
addl %edx,%ecx
xorl %eax,%edi
xorl %ecx,%edi
movl 36(%esi),%ebp
addl %edi,%ebx
movl %ecx,%edi
roll $23,%ebx
addl %ecx,%ebx
xorl %edx,%edi
xorl %ebx,%edi
leal 3654602809(%eax,%ebp,1),%eax
addl %edi,%eax
roll $4,%eax
movl 48(%esi),%ebp
movl %ebx,%edi
leal 3873151461(%edx,%ebp,1),%edx
addl %ebx,%eax
xorl %ecx,%edi
xorl %eax,%edi
movl 60(%esi),%ebp
addl %edi,%edx
movl %eax,%edi
roll $11,%edx
addl %eax,%edx
xorl %ebx,%edi
xorl %edx,%edi
leal 530742520(%ecx,%ebp,1),%ecx
addl %edi,%ecx
roll $16,%ecx
movl 8(%esi),%ebp
movl %edx,%edi
leal 3299628645(%ebx,%ebp,1),%ebx
addl %edx,%ecx
xorl %eax,%edi
xorl %ecx,%edi
movl (%esi),%ebp
addl %edi,%ebx
movl $-1,%edi
roll $23,%ebx
addl %ecx,%ebx
xorl %edx,%edi
orl %ebx,%edi
leal 4096336452(%eax,%ebp,1),%eax
xorl %ecx,%edi
movl 28(%esi),%ebp
addl %edi,%eax
movl $-1,%edi
roll $6,%eax
xorl %ecx,%edi
addl %ebx,%eax
orl %eax,%edi
leal 1126891415(%edx,%ebp,1),%edx
xorl %ebx,%edi
movl 56(%esi),%ebp
addl %edi,%edx
movl $-1,%edi
roll $10,%edx
xorl %ebx,%edi
addl %eax,%edx
orl %edx,%edi
leal 2878612391(%ecx,%ebp,1),%ecx
xorl %eax,%edi
movl 20(%esi),%ebp
addl %edi,%ecx
movl $-1,%edi
roll $15,%ecx
xorl %eax,%edi
addl %edx,%ecx
orl %ecx,%edi
leal 4237533241(%ebx,%ebp,1),%ebx
xorl %edx,%edi
movl 48(%esi),%ebp
addl %edi,%ebx
movl $-1,%edi
roll $21,%ebx
xorl %edx,%edi
addl %ecx,%ebx
orl %ebx,%edi
leal 1700485571(%eax,%ebp,1),%eax
xorl %ecx,%edi
movl 12(%esi),%ebp
addl %edi,%eax
movl $-1,%edi
roll $6,%eax
xorl %ecx,%edi
addl %ebx,%eax
orl %eax,%edi
leal 2399980690(%edx,%ebp,1),%edx
xorl %ebx,%edi
movl 40(%esi),%ebp
addl %edi,%edx
movl $-1,%edi
roll $10,%edx
xorl %ebx,%edi
addl %eax,%edx
orl %edx,%edi
leal 4293915773(%ecx,%ebp,1),%ecx
xorl %eax,%edi
movl 4(%esi),%ebp
addl %edi,%ecx
movl $-1,%edi
roll $15,%ecx
xorl %eax,%edi
addl %edx,%ecx
orl %ecx,%edi
leal 2240044497(%ebx,%ebp,1),%ebx
xorl %edx,%edi
movl 32(%esi),%ebp
addl %edi,%ebx
movl $-1,%edi
roll $21,%ebx
xorl %edx,%edi
addl %ecx,%ebx
orl %ebx,%edi
leal 1873313359(%eax,%ebp,1),%eax
xorl %ecx,%edi
movl 60(%esi),%ebp
addl %edi,%eax
movl $-1,%edi
roll $6,%eax
xorl %ecx,%edi
addl %ebx,%eax
orl %eax,%edi
leal 4264355552(%edx,%ebp,1),%edx
xorl %ebx,%edi
movl 24(%esi),%ebp
addl %edi,%edx
movl $-1,%edi
roll $10,%edx
xorl %ebx,%edi
addl %eax,%edx
orl %edx,%edi
leal 2734768916(%ecx,%ebp,1),%ecx
xorl %eax,%edi
movl 52(%esi),%ebp
addl %edi,%ecx
movl $-1,%edi
roll $15,%ecx
xorl %eax,%edi
addl %edx,%ecx
orl %ecx,%edi
leal 1309151649(%ebx,%ebp,1),%ebx
xorl %edx,%edi
movl 16(%esi),%ebp
addl %edi,%ebx
movl $-1,%edi
roll $21,%ebx
xorl %edx,%edi
addl %ecx,%ebx
orl %ebx,%edi
leal 4149444226(%eax,%ebp,1),%eax
xorl %ecx,%edi
movl 44(%esi),%ebp
addl %edi,%eax
movl $-1,%edi
roll $6,%eax
xorl %ecx,%edi
addl %ebx,%eax
orl %eax,%edi
leal 3174756917(%edx,%ebp,1),%edx
xorl %ebx,%edi
movl 8(%esi),%ebp
addl %edi,%edx
movl $-1,%edi
roll $10,%edx
xorl %ebx,%edi
addl %eax,%edx
orl %edx,%edi
leal 718787259(%ecx,%ebp,1),%ecx
xorl %eax,%edi
movl 36(%esi),%ebp
addl %edi,%ecx
movl $-1,%edi
roll $15,%ecx
xorl %eax,%edi
addl %edx,%ecx
orl %ecx,%edi
leal 3951481745(%ebx,%ebp,1),%ebx
xorl %edx,%edi
movl 24(%esp),%ebp
addl %edi,%ebx
addl $64,%esi
roll $21,%ebx
movl (%ebp),%edi
addl %ecx,%ebx
addl %edi,%eax
movl 4(%ebp),%edi
addl %edi,%ebx
movl 8(%ebp),%edi
addl %edi,%ecx
movl 12(%ebp),%edi
addl %edi,%edx
movl %eax,(%ebp)
movl %ebx,4(%ebp)
movl (%esp),%edi
movl %ecx,8(%ebp)
movl %edx,12(%ebp)
cmpl %esi,%edi
jae .L000start
popl %eax
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
.size md5_block_asm_data_order,.-.L_md5_block_asm_data_order_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
weix2025/toy
| 51,296
|
deps/boringssl/linux-x86/crypto/fipsmodule/aesni-x86-linux.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
#ifdef BORINGSSL_DISPATCH_TEST
#endif
.globl aes_hw_encrypt
.hidden aes_hw_encrypt
.type aes_hw_encrypt,@function
.align 16
aes_hw_encrypt:
.L_aes_hw_encrypt_begin:
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L000pic
.L000pic:
popl %ebx
leal BORINGSSL_function_hit+1-.L000pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 4(%esp),%eax
movl 12(%esp),%edx
movups (%eax),%xmm2
movl 240(%edx),%ecx
movl 8(%esp),%eax
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L001enc1_loop_1:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L001enc1_loop_1
.byte 102,15,56,221,209
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
movups %xmm2,(%eax)
pxor %xmm2,%xmm2
ret
.size aes_hw_encrypt,.-.L_aes_hw_encrypt_begin
.globl aes_hw_decrypt
.hidden aes_hw_decrypt
.type aes_hw_decrypt,@function
.align 16
aes_hw_decrypt:
.L_aes_hw_decrypt_begin:
movl 4(%esp),%eax
movl 12(%esp),%edx
movups (%eax),%xmm2
movl 240(%edx),%ecx
movl 8(%esp),%eax
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L002dec1_loop_2:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L002dec1_loop_2
.byte 102,15,56,223,209
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
movups %xmm2,(%eax)
pxor %xmm2,%xmm2
ret
.size aes_hw_decrypt,.-.L_aes_hw_decrypt_begin
.hidden _aesni_encrypt2
.type _aesni_encrypt2,@function
.align 16
_aesni_encrypt2:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L003enc2_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%edx,%ecx,1),%xmm0
jnz .L003enc2_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
ret
.size _aesni_encrypt2,.-_aesni_encrypt2
.hidden _aesni_decrypt2
.type _aesni_decrypt2,@function
.align 16
_aesni_decrypt2:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L004dec2_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
movups -16(%edx,%ecx,1),%xmm0
jnz .L004dec2_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,223,208
.byte 102,15,56,223,216
ret
.size _aesni_decrypt2,.-_aesni_decrypt2
.hidden _aesni_encrypt3
.type _aesni_encrypt3,@function
.align 16
_aesni_encrypt3:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L005enc3_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
movups -16(%edx,%ecx,1),%xmm0
jnz .L005enc3_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
ret
.size _aesni_encrypt3,.-_aesni_encrypt3
.hidden _aesni_decrypt3
.type _aesni_decrypt3,@function
.align 16
_aesni_decrypt3:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L006dec3_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
movups -16(%edx,%ecx,1),%xmm0
jnz .L006dec3_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.byte 102,15,56,223,208
.byte 102,15,56,223,216
.byte 102,15,56,223,224
ret
.size _aesni_decrypt3,.-_aesni_decrypt3
.hidden _aesni_encrypt4
.type _aesni_encrypt4,@function
.align 16
_aesni_encrypt4:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
shll $4,%ecx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 15,31,64,0
addl $16,%ecx
.L007enc4_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movups -16(%edx,%ecx,1),%xmm0
jnz .L007enc4_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
ret
.size _aesni_encrypt4,.-_aesni_encrypt4
.hidden _aesni_decrypt4
.type _aesni_decrypt4,@function
.align 16
_aesni_decrypt4:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
shll $4,%ecx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 15,31,64,0
addl $16,%ecx
.L008dec4_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.byte 102,15,56,222,233
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
.byte 102,15,56,222,232
movups -16(%edx,%ecx,1),%xmm0
jnz .L008dec4_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.byte 102,15,56,222,233
.byte 102,15,56,223,208
.byte 102,15,56,223,216
.byte 102,15,56,223,224
.byte 102,15,56,223,232
ret
.size _aesni_decrypt4,.-_aesni_decrypt4
.hidden _aesni_encrypt6
.type _aesni_encrypt6,@function
.align 16
_aesni_encrypt6:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,220,209
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,220,217
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 102,15,56,220,225
pxor %xmm0,%xmm7
movups (%edx,%ecx,1),%xmm0
addl $16,%ecx
jmp .L009_aesni_encrypt6_inner
.align 16
.L010enc6_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.L009_aesni_encrypt6_inner:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.L_aesni_encrypt6_enter:
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
movups -16(%edx,%ecx,1),%xmm0
jnz .L010enc6_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
ret
.size _aesni_encrypt6,.-_aesni_encrypt6
.hidden _aesni_decrypt6
.type _aesni_decrypt6,@function
.align 16
_aesni_decrypt6:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,222,209
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,222,217
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 102,15,56,222,225
pxor %xmm0,%xmm7
movups (%edx,%ecx,1),%xmm0
addl $16,%ecx
jmp .L011_aesni_decrypt6_inner
.align 16
.L012dec6_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.L011_aesni_decrypt6_inner:
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
.L_aesni_decrypt6_enter:
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
.byte 102,15,56,222,232
.byte 102,15,56,222,240
.byte 102,15,56,222,248
movups -16(%edx,%ecx,1),%xmm0
jnz .L012dec6_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
.byte 102,15,56,223,208
.byte 102,15,56,223,216
.byte 102,15,56,223,224
.byte 102,15,56,223,232
.byte 102,15,56,223,240
.byte 102,15,56,223,248
ret
.size _aesni_decrypt6,.-_aesni_decrypt6
.globl aes_hw_ecb_encrypt
.hidden aes_hw_ecb_encrypt
.type aes_hw_ecb_encrypt,@function
.align 16
aes_hw_ecb_encrypt:
.L_aes_hw_ecb_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
andl $-16,%eax
jz .L013ecb_ret
movl 240(%edx),%ecx
testl %ebx,%ebx
jz .L014ecb_decrypt
movl %edx,%ebp
movl %ecx,%ebx
cmpl $96,%eax
jb .L015ecb_enc_tail
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
movdqu 48(%esi),%xmm5
movdqu 64(%esi),%xmm6
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
subl $96,%eax
jmp .L016ecb_enc_loop6_enter
.align 16
.L017ecb_enc_loop6:
movups %xmm2,(%edi)
movdqu (%esi),%xmm2
movups %xmm3,16(%edi)
movdqu 16(%esi),%xmm3
movups %xmm4,32(%edi)
movdqu 32(%esi),%xmm4
movups %xmm5,48(%edi)
movdqu 48(%esi),%xmm5
movups %xmm6,64(%edi)
movdqu 64(%esi),%xmm6
movups %xmm7,80(%edi)
leal 96(%edi),%edi
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
.L016ecb_enc_loop6_enter:
call _aesni_encrypt6
movl %ebp,%edx
movl %ebx,%ecx
subl $96,%eax
jnc .L017ecb_enc_loop6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
movups %xmm7,80(%edi)
leal 96(%edi),%edi
addl $96,%eax
jz .L013ecb_ret
.L015ecb_enc_tail:
movups (%esi),%xmm2
cmpl $32,%eax
jb .L018ecb_enc_one
movups 16(%esi),%xmm3
je .L019ecb_enc_two
movups 32(%esi),%xmm4
cmpl $64,%eax
jb .L020ecb_enc_three
movups 48(%esi),%xmm5
je .L021ecb_enc_four
movups 64(%esi),%xmm6
xorps %xmm7,%xmm7
call _aesni_encrypt6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
jmp .L013ecb_ret
.align 16
.L018ecb_enc_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L022enc1_loop_3:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L022enc1_loop_3
.byte 102,15,56,221,209
movups %xmm2,(%edi)
jmp .L013ecb_ret
.align 16
.L019ecb_enc_two:
call _aesni_encrypt2
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
jmp .L013ecb_ret
.align 16
.L020ecb_enc_three:
call _aesni_encrypt3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
jmp .L013ecb_ret
.align 16
.L021ecb_enc_four:
call _aesni_encrypt4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
jmp .L013ecb_ret
.align 16
.L014ecb_decrypt:
movl %edx,%ebp
movl %ecx,%ebx
cmpl $96,%eax
jb .L023ecb_dec_tail
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
movdqu 48(%esi),%xmm5
movdqu 64(%esi),%xmm6
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
subl $96,%eax
jmp .L024ecb_dec_loop6_enter
.align 16
.L025ecb_dec_loop6:
movups %xmm2,(%edi)
movdqu (%esi),%xmm2
movups %xmm3,16(%edi)
movdqu 16(%esi),%xmm3
movups %xmm4,32(%edi)
movdqu 32(%esi),%xmm4
movups %xmm5,48(%edi)
movdqu 48(%esi),%xmm5
movups %xmm6,64(%edi)
movdqu 64(%esi),%xmm6
movups %xmm7,80(%edi)
leal 96(%edi),%edi
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
.L024ecb_dec_loop6_enter:
call _aesni_decrypt6
movl %ebp,%edx
movl %ebx,%ecx
subl $96,%eax
jnc .L025ecb_dec_loop6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
movups %xmm7,80(%edi)
leal 96(%edi),%edi
addl $96,%eax
jz .L013ecb_ret
.L023ecb_dec_tail:
movups (%esi),%xmm2
cmpl $32,%eax
jb .L026ecb_dec_one
movups 16(%esi),%xmm3
je .L027ecb_dec_two
movups 32(%esi),%xmm4
cmpl $64,%eax
jb .L028ecb_dec_three
movups 48(%esi),%xmm5
je .L029ecb_dec_four
movups 64(%esi),%xmm6
xorps %xmm7,%xmm7
call _aesni_decrypt6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
jmp .L013ecb_ret
.align 16
.L026ecb_dec_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L030dec1_loop_4:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L030dec1_loop_4
.byte 102,15,56,223,209
movups %xmm2,(%edi)
jmp .L013ecb_ret
.align 16
.L027ecb_dec_two:
call _aesni_decrypt2
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
jmp .L013ecb_ret
.align 16
.L028ecb_dec_three:
call _aesni_decrypt3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
jmp .L013ecb_ret
.align 16
.L029ecb_dec_four:
call _aesni_decrypt4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
.L013ecb_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_ecb_encrypt,.-.L_aes_hw_ecb_encrypt_begin
.globl aes_hw_ccm64_encrypt_blocks
.hidden aes_hw_ccm64_encrypt_blocks
.type aes_hw_ccm64_encrypt_blocks,@function
.align 16
aes_hw_ccm64_encrypt_blocks:
.L_aes_hw_ccm64_encrypt_blocks_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl 40(%esp),%ecx
movl %esp,%ebp
subl $60,%esp
andl $-16,%esp
movl %ebp,48(%esp)
movdqu (%ebx),%xmm7
movdqu (%ecx),%xmm3
movl 240(%edx),%ecx
movl $202182159,(%esp)
movl $134810123,4(%esp)
movl $67438087,8(%esp)
movl $66051,12(%esp)
movl $1,%ebx
xorl %ebp,%ebp
movl %ebx,16(%esp)
movl %ebp,20(%esp)
movl %ebp,24(%esp)
movl %ebp,28(%esp)
shll $4,%ecx
movl $16,%ebx
leal (%edx),%ebp
movdqa (%esp),%xmm5
movdqa %xmm7,%xmm2
leal 32(%edx,%ecx,1),%edx
subl %ecx,%ebx
.byte 102,15,56,0,253
.L031ccm64_enc_outer:
movups (%ebp),%xmm0
movl %ebx,%ecx
movups (%esi),%xmm6
xorps %xmm0,%xmm2
movups 16(%ebp),%xmm1
xorps %xmm6,%xmm0
xorps %xmm0,%xmm3
movups 32(%ebp),%xmm0
.L032ccm64_enc2_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%edx,%ecx,1),%xmm0
jnz .L032ccm64_enc2_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
paddq 16(%esp),%xmm7
decl %eax
.byte 102,15,56,221,208
.byte 102,15,56,221,216
leal 16(%esi),%esi
xorps %xmm2,%xmm6
movdqa %xmm7,%xmm2
movups %xmm6,(%edi)
.byte 102,15,56,0,213
leal 16(%edi),%edi
jnz .L031ccm64_enc_outer
movl 48(%esp),%esp
movl 40(%esp),%edi
movups %xmm3,(%edi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_ccm64_encrypt_blocks,.-.L_aes_hw_ccm64_encrypt_blocks_begin
.globl aes_hw_ccm64_decrypt_blocks
.hidden aes_hw_ccm64_decrypt_blocks
.type aes_hw_ccm64_decrypt_blocks,@function
.align 16
aes_hw_ccm64_decrypt_blocks:
.L_aes_hw_ccm64_decrypt_blocks_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl 40(%esp),%ecx
movl %esp,%ebp
subl $60,%esp
andl $-16,%esp
movl %ebp,48(%esp)
movdqu (%ebx),%xmm7
movdqu (%ecx),%xmm3
movl 240(%edx),%ecx
movl $202182159,(%esp)
movl $134810123,4(%esp)
movl $67438087,8(%esp)
movl $66051,12(%esp)
movl $1,%ebx
xorl %ebp,%ebp
movl %ebx,16(%esp)
movl %ebp,20(%esp)
movl %ebp,24(%esp)
movl %ebp,28(%esp)
movdqa (%esp),%xmm5
movdqa %xmm7,%xmm2
movl %edx,%ebp
movl %ecx,%ebx
.byte 102,15,56,0,253
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L033enc1_loop_5:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L033enc1_loop_5
.byte 102,15,56,221,209
shll $4,%ebx
movl $16,%ecx
movups (%esi),%xmm6
paddq 16(%esp),%xmm7
leal 16(%esi),%esi
subl %ebx,%ecx
leal 32(%ebp,%ebx,1),%edx
movl %ecx,%ebx
jmp .L034ccm64_dec_outer
.align 16
.L034ccm64_dec_outer:
xorps %xmm2,%xmm6
movdqa %xmm7,%xmm2
movups %xmm6,(%edi)
leal 16(%edi),%edi
.byte 102,15,56,0,213
subl $1,%eax
jz .L035ccm64_dec_break
movups (%ebp),%xmm0
movl %ebx,%ecx
movups 16(%ebp),%xmm1
xorps %xmm0,%xmm6
xorps %xmm0,%xmm2
xorps %xmm6,%xmm3
movups 32(%ebp),%xmm0
.L036ccm64_dec2_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%edx,%ecx,1),%xmm0
jnz .L036ccm64_dec2_loop
movups (%esi),%xmm6
paddq 16(%esp),%xmm7
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
leal 16(%esi),%esi
jmp .L034ccm64_dec_outer
.align 16
.L035ccm64_dec_break:
movl 240(%ebp),%ecx
movl %ebp,%edx
movups (%edx),%xmm0
movups 16(%edx),%xmm1
xorps %xmm0,%xmm6
leal 32(%edx),%edx
xorps %xmm6,%xmm3
.L037enc1_loop_6:
.byte 102,15,56,220,217
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L037enc1_loop_6
.byte 102,15,56,221,217
movl 48(%esp),%esp
movl 40(%esp),%edi
movups %xmm3,(%edi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_ccm64_decrypt_blocks,.-.L_aes_hw_ccm64_decrypt_blocks_begin
.globl aes_hw_ctr32_encrypt_blocks
.hidden aes_hw_ctr32_encrypt_blocks
.type aes_hw_ctr32_encrypt_blocks,@function
.align 16
aes_hw_ctr32_encrypt_blocks:
.L_aes_hw_ctr32_encrypt_blocks_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L038pic
.L038pic:
popl %ebx
leal BORINGSSL_function_hit+0-.L038pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl %esp,%ebp
subl $88,%esp
andl $-16,%esp
movl %ebp,80(%esp)
cmpl $1,%eax
je .L039ctr32_one_shortcut
movdqu (%ebx),%xmm7
movl $202182159,(%esp)
movl $134810123,4(%esp)
movl $67438087,8(%esp)
movl $66051,12(%esp)
movl $6,%ecx
xorl %ebp,%ebp
movl %ecx,16(%esp)
movl %ecx,20(%esp)
movl %ecx,24(%esp)
movl %ebp,28(%esp)
.byte 102,15,58,22,251,3
.byte 102,15,58,34,253,3
movl 240(%edx),%ecx
bswap %ebx
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
movdqa (%esp),%xmm2
.byte 102,15,58,34,195,0
leal 3(%ebx),%ebp
.byte 102,15,58,34,205,0
incl %ebx
.byte 102,15,58,34,195,1
incl %ebp
.byte 102,15,58,34,205,1
incl %ebx
.byte 102,15,58,34,195,2
incl %ebp
.byte 102,15,58,34,205,2
movdqa %xmm0,48(%esp)
.byte 102,15,56,0,194
movdqu (%edx),%xmm6
movdqa %xmm1,64(%esp)
.byte 102,15,56,0,202
pshufd $192,%xmm0,%xmm2
pshufd $128,%xmm0,%xmm3
cmpl $6,%eax
jb .L040ctr32_tail
pxor %xmm6,%xmm7
shll $4,%ecx
movl $16,%ebx
movdqa %xmm7,32(%esp)
movl %edx,%ebp
subl %ecx,%ebx
leal 32(%edx,%ecx,1),%edx
subl $6,%eax
jmp .L041ctr32_loop6
.align 16
.L041ctr32_loop6:
pshufd $64,%xmm0,%xmm4
movdqa 32(%esp),%xmm0
pshufd $192,%xmm1,%xmm5
pxor %xmm0,%xmm2
pshufd $128,%xmm1,%xmm6
pxor %xmm0,%xmm3
pshufd $64,%xmm1,%xmm7
movups 16(%ebp),%xmm1
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
.byte 102,15,56,220,209
pxor %xmm0,%xmm6
pxor %xmm0,%xmm7
.byte 102,15,56,220,217
movups 32(%ebp),%xmm0
movl %ebx,%ecx
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
call .L_aesni_encrypt6_enter
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps %xmm1,%xmm2
movups 32(%esi),%xmm1
xorps %xmm0,%xmm3
movups %xmm2,(%edi)
movdqa 16(%esp),%xmm0
xorps %xmm1,%xmm4
movdqa 64(%esp),%xmm1
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
paddd %xmm0,%xmm1
paddd 48(%esp),%xmm0
movdqa (%esp),%xmm2
movups 48(%esi),%xmm3
movups 64(%esi),%xmm4
xorps %xmm3,%xmm5
movups 80(%esi),%xmm3
leal 96(%esi),%esi
movdqa %xmm0,48(%esp)
.byte 102,15,56,0,194
xorps %xmm4,%xmm6
movups %xmm5,48(%edi)
xorps %xmm3,%xmm7
movdqa %xmm1,64(%esp)
.byte 102,15,56,0,202
movups %xmm6,64(%edi)
pshufd $192,%xmm0,%xmm2
movups %xmm7,80(%edi)
leal 96(%edi),%edi
pshufd $128,%xmm0,%xmm3
subl $6,%eax
jnc .L041ctr32_loop6
addl $6,%eax
jz .L042ctr32_ret
movdqu (%ebp),%xmm7
movl %ebp,%edx
pxor 32(%esp),%xmm7
movl 240(%ebp),%ecx
.L040ctr32_tail:
por %xmm7,%xmm2
cmpl $2,%eax
jb .L043ctr32_one
pshufd $64,%xmm0,%xmm4
por %xmm7,%xmm3
je .L044ctr32_two
pshufd $192,%xmm1,%xmm5
por %xmm7,%xmm4
cmpl $4,%eax
jb .L045ctr32_three
pshufd $128,%xmm1,%xmm6
por %xmm7,%xmm5
je .L046ctr32_four
por %xmm7,%xmm6
call _aesni_encrypt6
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps %xmm1,%xmm2
movups 32(%esi),%xmm1
xorps %xmm0,%xmm3
movups 48(%esi),%xmm0
xorps %xmm1,%xmm4
movups 64(%esi),%xmm1
xorps %xmm0,%xmm5
movups %xmm2,(%edi)
xorps %xmm1,%xmm6
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
jmp .L042ctr32_ret
.align 16
.L039ctr32_one_shortcut:
movups (%ebx),%xmm2
movl 240(%edx),%ecx
.L043ctr32_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L047enc1_loop_7:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L047enc1_loop_7
.byte 102,15,56,221,209
movups (%esi),%xmm6
xorps %xmm2,%xmm6
movups %xmm6,(%edi)
jmp .L042ctr32_ret
.align 16
.L044ctr32_two:
call _aesni_encrypt2
movups (%esi),%xmm5
movups 16(%esi),%xmm6
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
jmp .L042ctr32_ret
.align 16
.L045ctr32_three:
call _aesni_encrypt3
movups (%esi),%xmm5
movups 16(%esi),%xmm6
xorps %xmm5,%xmm2
movups 32(%esi),%xmm7
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
xorps %xmm7,%xmm4
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
jmp .L042ctr32_ret
.align 16
.L046ctr32_four:
call _aesni_encrypt4
movups (%esi),%xmm6
movups 16(%esi),%xmm7
movups 32(%esi),%xmm1
xorps %xmm6,%xmm2
movups 48(%esi),%xmm0
xorps %xmm7,%xmm3
movups %xmm2,(%edi)
xorps %xmm1,%xmm4
movups %xmm3,16(%edi)
xorps %xmm0,%xmm5
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
.L042ctr32_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
movdqa %xmm0,32(%esp)
pxor %xmm5,%xmm5
movdqa %xmm0,48(%esp)
pxor %xmm6,%xmm6
movdqa %xmm0,64(%esp)
pxor %xmm7,%xmm7
movl 80(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_ctr32_encrypt_blocks,.-.L_aes_hw_ctr32_encrypt_blocks_begin
.globl aes_hw_xts_encrypt
.hidden aes_hw_xts_encrypt
.type aes_hw_xts_encrypt,@function
.align 16
aes_hw_xts_encrypt:
.L_aes_hw_xts_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 36(%esp),%edx
movl 40(%esp),%esi
movl 240(%edx),%ecx
movups (%esi),%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L048enc1_loop_8:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L048enc1_loop_8
.byte 102,15,56,221,209
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl %esp,%ebp
subl $120,%esp
movl 240(%edx),%ecx
andl $-16,%esp
movl $135,96(%esp)
movl $0,100(%esp)
movl $1,104(%esp)
movl $0,108(%esp)
movl %eax,112(%esp)
movl %ebp,116(%esp)
movdqa %xmm2,%xmm1
pxor %xmm0,%xmm0
movdqa 96(%esp),%xmm3
pcmpgtd %xmm1,%xmm0
andl $-16,%eax
movl %edx,%ebp
movl %ecx,%ebx
subl $96,%eax
jc .L049xts_enc_short
shll $4,%ecx
movl $16,%ebx
subl %ecx,%ebx
leal 32(%edx,%ecx,1),%edx
jmp .L050xts_enc_loop6
.align 16
.L050xts_enc_loop6:
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,16(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,32(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,48(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,64(%esp)
paddq %xmm1,%xmm1
movups (%ebp),%xmm0
pand %xmm3,%xmm7
movups (%esi),%xmm2
pxor %xmm1,%xmm7
movl %ebx,%ecx
movdqu 16(%esi),%xmm3
xorps %xmm0,%xmm2
movdqu 32(%esi),%xmm4
pxor %xmm0,%xmm3
movdqu 48(%esi),%xmm5
pxor %xmm0,%xmm4
movdqu 64(%esi),%xmm6
pxor %xmm0,%xmm5
movdqu 80(%esi),%xmm1
pxor %xmm0,%xmm6
leal 96(%esi),%esi
pxor (%esp),%xmm2
movdqa %xmm7,80(%esp)
pxor %xmm1,%xmm7
movups 16(%ebp),%xmm1
pxor 16(%esp),%xmm3
pxor 32(%esp),%xmm4
.byte 102,15,56,220,209
pxor 48(%esp),%xmm5
pxor 64(%esp),%xmm6
.byte 102,15,56,220,217
pxor %xmm0,%xmm7
movups 32(%ebp),%xmm0
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
call .L_aesni_encrypt6_enter
movdqa 80(%esp),%xmm1
pxor %xmm0,%xmm0
xorps (%esp),%xmm2
pcmpgtd %xmm1,%xmm0
xorps 16(%esp),%xmm3
movups %xmm2,(%edi)
xorps 32(%esp),%xmm4
movups %xmm3,16(%edi)
xorps 48(%esp),%xmm5
movups %xmm4,32(%edi)
xorps 64(%esp),%xmm6
movups %xmm5,48(%edi)
xorps %xmm1,%xmm7
movups %xmm6,64(%edi)
pshufd $19,%xmm0,%xmm2
movups %xmm7,80(%edi)
leal 96(%edi),%edi
movdqa 96(%esp),%xmm3
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
subl $96,%eax
jnc .L050xts_enc_loop6
movl 240(%ebp),%ecx
movl %ebp,%edx
movl %ecx,%ebx
.L049xts_enc_short:
addl $96,%eax
jz .L051xts_enc_done6x
movdqa %xmm1,%xmm5
cmpl $32,%eax
jb .L052xts_enc_one
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
je .L053xts_enc_two
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm6
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
cmpl $64,%eax
jb .L054xts_enc_three
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm7
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
movdqa %xmm5,(%esp)
movdqa %xmm6,16(%esp)
je .L055xts_enc_four
movdqa %xmm7,32(%esp)
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,48(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm7
pxor %xmm1,%xmm7
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
pxor (%esp),%xmm2
movdqu 48(%esi),%xmm5
pxor 16(%esp),%xmm3
movdqu 64(%esi),%xmm6
pxor 32(%esp),%xmm4
leal 80(%esi),%esi
pxor 48(%esp),%xmm5
movdqa %xmm7,64(%esp)
pxor %xmm7,%xmm6
call _aesni_encrypt6
movaps 64(%esp),%xmm1
xorps (%esp),%xmm2
xorps 16(%esp),%xmm3
xorps 32(%esp),%xmm4
movups %xmm2,(%edi)
xorps 48(%esp),%xmm5
movups %xmm3,16(%edi)
xorps %xmm1,%xmm6
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
leal 80(%edi),%edi
jmp .L056xts_enc_done
.align 16
.L052xts_enc_one:
movups (%esi),%xmm2
leal 16(%esi),%esi
xorps %xmm5,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L057enc1_loop_9:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L057enc1_loop_9
.byte 102,15,56,221,209
xorps %xmm5,%xmm2
movups %xmm2,(%edi)
leal 16(%edi),%edi
movdqa %xmm5,%xmm1
jmp .L056xts_enc_done
.align 16
.L053xts_enc_two:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
leal 32(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
call _aesni_encrypt2
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
leal 32(%edi),%edi
movdqa %xmm6,%xmm1
jmp .L056xts_enc_done
.align 16
.L054xts_enc_three:
movaps %xmm1,%xmm7
movups (%esi),%xmm2
movups 16(%esi),%xmm3
movups 32(%esi),%xmm4
leal 48(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
xorps %xmm7,%xmm4
call _aesni_encrypt3
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
xorps %xmm7,%xmm4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
leal 48(%edi),%edi
movdqa %xmm7,%xmm1
jmp .L056xts_enc_done
.align 16
.L055xts_enc_four:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
movups 32(%esi),%xmm4
xorps (%esp),%xmm2
movups 48(%esi),%xmm5
leal 64(%esi),%esi
xorps 16(%esp),%xmm3
xorps %xmm7,%xmm4
xorps %xmm6,%xmm5
call _aesni_encrypt4
xorps (%esp),%xmm2
xorps 16(%esp),%xmm3
xorps %xmm7,%xmm4
movups %xmm2,(%edi)
xorps %xmm6,%xmm5
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
leal 64(%edi),%edi
movdqa %xmm6,%xmm1
jmp .L056xts_enc_done
.align 16
.L051xts_enc_done6x:
movl 112(%esp),%eax
andl $15,%eax
jz .L058xts_enc_ret
movdqa %xmm1,%xmm5
movl %eax,112(%esp)
jmp .L059xts_enc_steal
.align 16
.L056xts_enc_done:
movl 112(%esp),%eax
pxor %xmm0,%xmm0
andl $15,%eax
jz .L058xts_enc_ret
pcmpgtd %xmm1,%xmm0
movl %eax,112(%esp)
pshufd $19,%xmm0,%xmm5
paddq %xmm1,%xmm1
pand 96(%esp),%xmm5
pxor %xmm1,%xmm5
.L059xts_enc_steal:
movzbl (%esi),%ecx
movzbl -16(%edi),%edx
leal 1(%esi),%esi
movb %cl,-16(%edi)
movb %dl,(%edi)
leal 1(%edi),%edi
subl $1,%eax
jnz .L059xts_enc_steal
subl 112(%esp),%edi
movl %ebp,%edx
movl %ebx,%ecx
movups -16(%edi),%xmm2
xorps %xmm5,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L060enc1_loop_10:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L060enc1_loop_10
.byte 102,15,56,221,209
xorps %xmm5,%xmm2
movups %xmm2,-16(%edi)
.L058xts_enc_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
movdqa %xmm0,(%esp)
pxor %xmm3,%xmm3
movdqa %xmm0,16(%esp)
pxor %xmm4,%xmm4
movdqa %xmm0,32(%esp)
pxor %xmm5,%xmm5
movdqa %xmm0,48(%esp)
pxor %xmm6,%xmm6
movdqa %xmm0,64(%esp)
pxor %xmm7,%xmm7
movdqa %xmm0,80(%esp)
movl 116(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_xts_encrypt,.-.L_aes_hw_xts_encrypt_begin
.globl aes_hw_xts_decrypt
.hidden aes_hw_xts_decrypt
.type aes_hw_xts_decrypt,@function
.align 16
aes_hw_xts_decrypt:
.L_aes_hw_xts_decrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 36(%esp),%edx
movl 40(%esp),%esi
movl 240(%edx),%ecx
movups (%esi),%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L061enc1_loop_11:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L061enc1_loop_11
.byte 102,15,56,221,209
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl %esp,%ebp
subl $120,%esp
andl $-16,%esp
xorl %ebx,%ebx
testl $15,%eax
setnz %bl
shll $4,%ebx
subl %ebx,%eax
movl $135,96(%esp)
movl $0,100(%esp)
movl $1,104(%esp)
movl $0,108(%esp)
movl %eax,112(%esp)
movl %ebp,116(%esp)
movl 240(%edx),%ecx
movl %edx,%ebp
movl %ecx,%ebx
movdqa %xmm2,%xmm1
pxor %xmm0,%xmm0
movdqa 96(%esp),%xmm3
pcmpgtd %xmm1,%xmm0
andl $-16,%eax
subl $96,%eax
jc .L062xts_dec_short
shll $4,%ecx
movl $16,%ebx
subl %ecx,%ebx
leal 32(%edx,%ecx,1),%edx
jmp .L063xts_dec_loop6
.align 16
.L063xts_dec_loop6:
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,16(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,32(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,48(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,64(%esp)
paddq %xmm1,%xmm1
movups (%ebp),%xmm0
pand %xmm3,%xmm7
movups (%esi),%xmm2
pxor %xmm1,%xmm7
movl %ebx,%ecx
movdqu 16(%esi),%xmm3
xorps %xmm0,%xmm2
movdqu 32(%esi),%xmm4
pxor %xmm0,%xmm3
movdqu 48(%esi),%xmm5
pxor %xmm0,%xmm4
movdqu 64(%esi),%xmm6
pxor %xmm0,%xmm5
movdqu 80(%esi),%xmm1
pxor %xmm0,%xmm6
leal 96(%esi),%esi
pxor (%esp),%xmm2
movdqa %xmm7,80(%esp)
pxor %xmm1,%xmm7
movups 16(%ebp),%xmm1
pxor 16(%esp),%xmm3
pxor 32(%esp),%xmm4
.byte 102,15,56,222,209
pxor 48(%esp),%xmm5
pxor 64(%esp),%xmm6
.byte 102,15,56,222,217
pxor %xmm0,%xmm7
movups 32(%ebp),%xmm0
.byte 102,15,56,222,225
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
call .L_aesni_decrypt6_enter
movdqa 80(%esp),%xmm1
pxor %xmm0,%xmm0
xorps (%esp),%xmm2
pcmpgtd %xmm1,%xmm0
xorps 16(%esp),%xmm3
movups %xmm2,(%edi)
xorps 32(%esp),%xmm4
movups %xmm3,16(%edi)
xorps 48(%esp),%xmm5
movups %xmm4,32(%edi)
xorps 64(%esp),%xmm6
movups %xmm5,48(%edi)
xorps %xmm1,%xmm7
movups %xmm6,64(%edi)
pshufd $19,%xmm0,%xmm2
movups %xmm7,80(%edi)
leal 96(%edi),%edi
movdqa 96(%esp),%xmm3
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
subl $96,%eax
jnc .L063xts_dec_loop6
movl 240(%ebp),%ecx
movl %ebp,%edx
movl %ecx,%ebx
.L062xts_dec_short:
addl $96,%eax
jz .L064xts_dec_done6x
movdqa %xmm1,%xmm5
cmpl $32,%eax
jb .L065xts_dec_one
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
je .L066xts_dec_two
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm6
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
cmpl $64,%eax
jb .L067xts_dec_three
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm7
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
movdqa %xmm5,(%esp)
movdqa %xmm6,16(%esp)
je .L068xts_dec_four
movdqa %xmm7,32(%esp)
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,48(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm7
pxor %xmm1,%xmm7
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
pxor (%esp),%xmm2
movdqu 48(%esi),%xmm5
pxor 16(%esp),%xmm3
movdqu 64(%esi),%xmm6
pxor 32(%esp),%xmm4
leal 80(%esi),%esi
pxor 48(%esp),%xmm5
movdqa %xmm7,64(%esp)
pxor %xmm7,%xmm6
call _aesni_decrypt6
movaps 64(%esp),%xmm1
xorps (%esp),%xmm2
xorps 16(%esp),%xmm3
xorps 32(%esp),%xmm4
movups %xmm2,(%edi)
xorps 48(%esp),%xmm5
movups %xmm3,16(%edi)
xorps %xmm1,%xmm6
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
leal 80(%edi),%edi
jmp .L069xts_dec_done
.align 16
.L065xts_dec_one:
movups (%esi),%xmm2
leal 16(%esi),%esi
xorps %xmm5,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L070dec1_loop_12:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L070dec1_loop_12
.byte 102,15,56,223,209
xorps %xmm5,%xmm2
movups %xmm2,(%edi)
leal 16(%edi),%edi
movdqa %xmm5,%xmm1
jmp .L069xts_dec_done
.align 16
.L066xts_dec_two:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
leal 32(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
call _aesni_decrypt2
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
leal 32(%edi),%edi
movdqa %xmm6,%xmm1
jmp .L069xts_dec_done
.align 16
.L067xts_dec_three:
movaps %xmm1,%xmm7
movups (%esi),%xmm2
movups 16(%esi),%xmm3
movups 32(%esi),%xmm4
leal 48(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
xorps %xmm7,%xmm4
call _aesni_decrypt3
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
xorps %xmm7,%xmm4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
leal 48(%edi),%edi
movdqa %xmm7,%xmm1
jmp .L069xts_dec_done
.align 16
.L068xts_dec_four:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
movups 32(%esi),%xmm4
xorps (%esp),%xmm2
movups 48(%esi),%xmm5
leal 64(%esi),%esi
xorps 16(%esp),%xmm3
xorps %xmm7,%xmm4
xorps %xmm6,%xmm5
call _aesni_decrypt4
xorps (%esp),%xmm2
xorps 16(%esp),%xmm3
xorps %xmm7,%xmm4
movups %xmm2,(%edi)
xorps %xmm6,%xmm5
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
leal 64(%edi),%edi
movdqa %xmm6,%xmm1
jmp .L069xts_dec_done
.align 16
.L064xts_dec_done6x:
movl 112(%esp),%eax
andl $15,%eax
jz .L071xts_dec_ret
movl %eax,112(%esp)
jmp .L072xts_dec_only_one_more
.align 16
.L069xts_dec_done:
movl 112(%esp),%eax
pxor %xmm0,%xmm0
andl $15,%eax
jz .L071xts_dec_ret
pcmpgtd %xmm1,%xmm0
movl %eax,112(%esp)
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa 96(%esp),%xmm3
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
.L072xts_dec_only_one_more:
pshufd $19,%xmm0,%xmm5
movdqa %xmm1,%xmm6
paddq %xmm1,%xmm1
pand %xmm3,%xmm5
pxor %xmm1,%xmm5
movl %ebp,%edx
movl %ebx,%ecx
movups (%esi),%xmm2
xorps %xmm5,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L073dec1_loop_13:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L073dec1_loop_13
.byte 102,15,56,223,209
xorps %xmm5,%xmm2
movups %xmm2,(%edi)
.L074xts_dec_steal:
movzbl 16(%esi),%ecx
movzbl (%edi),%edx
leal 1(%esi),%esi
movb %cl,(%edi)
movb %dl,16(%edi)
leal 1(%edi),%edi
subl $1,%eax
jnz .L074xts_dec_steal
subl 112(%esp),%edi
movl %ebp,%edx
movl %ebx,%ecx
movups (%edi),%xmm2
xorps %xmm6,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L075dec1_loop_14:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L075dec1_loop_14
.byte 102,15,56,223,209
xorps %xmm6,%xmm2
movups %xmm2,(%edi)
.L071xts_dec_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
movdqa %xmm0,(%esp)
pxor %xmm3,%xmm3
movdqa %xmm0,16(%esp)
pxor %xmm4,%xmm4
movdqa %xmm0,32(%esp)
pxor %xmm5,%xmm5
movdqa %xmm0,48(%esp)
pxor %xmm6,%xmm6
movdqa %xmm0,64(%esp)
pxor %xmm7,%xmm7
movdqa %xmm0,80(%esp)
movl 116(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_xts_decrypt,.-.L_aes_hw_xts_decrypt_begin
.globl aes_hw_cbc_encrypt
.hidden aes_hw_cbc_encrypt
.type aes_hw_cbc_encrypt,@function
.align 16
aes_hw_cbc_encrypt:
.L_aes_hw_cbc_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl %esp,%ebx
movl 24(%esp),%edi
subl $24,%ebx
movl 28(%esp),%eax
andl $-16,%ebx
movl 32(%esp),%edx
movl 36(%esp),%ebp
testl %eax,%eax
jz .L076cbc_abort
cmpl $0,40(%esp)
xchgl %esp,%ebx
movups (%ebp),%xmm7
movl 240(%edx),%ecx
movl %edx,%ebp
movl %ebx,16(%esp)
movl %ecx,%ebx
je .L077cbc_decrypt
movaps %xmm7,%xmm2
cmpl $16,%eax
jb .L078cbc_enc_tail
subl $16,%eax
jmp .L079cbc_enc_loop
.align 16
.L079cbc_enc_loop:
movups (%esi),%xmm7
leal 16(%esi),%esi
movups (%edx),%xmm0
movups 16(%edx),%xmm1
xorps %xmm0,%xmm7
leal 32(%edx),%edx
xorps %xmm7,%xmm2
.L080enc1_loop_15:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L080enc1_loop_15
.byte 102,15,56,221,209
movl %ebx,%ecx
movl %ebp,%edx
movups %xmm2,(%edi)
leal 16(%edi),%edi
subl $16,%eax
jnc .L079cbc_enc_loop
addl $16,%eax
jnz .L078cbc_enc_tail
movaps %xmm2,%xmm7
pxor %xmm2,%xmm2
jmp .L081cbc_ret
.L078cbc_enc_tail:
movl %eax,%ecx
.long 2767451785
movl $16,%ecx
subl %eax,%ecx
xorl %eax,%eax
.long 2868115081
leal -16(%edi),%edi
movl %ebx,%ecx
movl %edi,%esi
movl %ebp,%edx
jmp .L079cbc_enc_loop
.align 16
.L077cbc_decrypt:
cmpl $80,%eax
jbe .L082cbc_dec_tail
movaps %xmm7,(%esp)
subl $80,%eax
jmp .L083cbc_dec_loop6_enter
.align 16
.L084cbc_dec_loop6:
movaps %xmm0,(%esp)
movups %xmm7,(%edi)
leal 16(%edi),%edi
.L083cbc_dec_loop6_enter:
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
movdqu 48(%esi),%xmm5
movdqu 64(%esi),%xmm6
movdqu 80(%esi),%xmm7
call _aesni_decrypt6
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps (%esp),%xmm2
xorps %xmm1,%xmm3
movups 32(%esi),%xmm1
xorps %xmm0,%xmm4
movups 48(%esi),%xmm0
xorps %xmm1,%xmm5
movups 64(%esi),%xmm1
xorps %xmm0,%xmm6
movups 80(%esi),%xmm0
xorps %xmm1,%xmm7
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
leal 96(%esi),%esi
movups %xmm4,32(%edi)
movl %ebx,%ecx
movups %xmm5,48(%edi)
movl %ebp,%edx
movups %xmm6,64(%edi)
leal 80(%edi),%edi
subl $96,%eax
ja .L084cbc_dec_loop6
movaps %xmm7,%xmm2
movaps %xmm0,%xmm7
addl $80,%eax
jle .L085cbc_dec_clear_tail_collected
movups %xmm2,(%edi)
leal 16(%edi),%edi
.L082cbc_dec_tail:
movups (%esi),%xmm2
movaps %xmm2,%xmm6
cmpl $16,%eax
jbe .L086cbc_dec_one
movups 16(%esi),%xmm3
movaps %xmm3,%xmm5
cmpl $32,%eax
jbe .L087cbc_dec_two
movups 32(%esi),%xmm4
cmpl $48,%eax
jbe .L088cbc_dec_three
movups 48(%esi),%xmm5
cmpl $64,%eax
jbe .L089cbc_dec_four
movups 64(%esi),%xmm6
movaps %xmm7,(%esp)
movups (%esi),%xmm2
xorps %xmm7,%xmm7
call _aesni_decrypt6
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps (%esp),%xmm2
xorps %xmm1,%xmm3
movups 32(%esi),%xmm1
xorps %xmm0,%xmm4
movups 48(%esi),%xmm0
xorps %xmm1,%xmm5
movups 64(%esi),%xmm7
xorps %xmm0,%xmm6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
pxor %xmm3,%xmm3
movups %xmm4,32(%edi)
pxor %xmm4,%xmm4
movups %xmm5,48(%edi)
pxor %xmm5,%xmm5
leal 64(%edi),%edi
movaps %xmm6,%xmm2
pxor %xmm6,%xmm6
subl $80,%eax
jmp .L090cbc_dec_tail_collected
.align 16
.L086cbc_dec_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L091dec1_loop_16:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L091dec1_loop_16
.byte 102,15,56,223,209
xorps %xmm7,%xmm2
movaps %xmm6,%xmm7
subl $16,%eax
jmp .L090cbc_dec_tail_collected
.align 16
.L087cbc_dec_two:
call _aesni_decrypt2
xorps %xmm7,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movaps %xmm3,%xmm2
pxor %xmm3,%xmm3
leal 16(%edi),%edi
movaps %xmm5,%xmm7
subl $32,%eax
jmp .L090cbc_dec_tail_collected
.align 16
.L088cbc_dec_three:
call _aesni_decrypt3
xorps %xmm7,%xmm2
xorps %xmm6,%xmm3
xorps %xmm5,%xmm4
movups %xmm2,(%edi)
movaps %xmm4,%xmm2
pxor %xmm4,%xmm4
movups %xmm3,16(%edi)
pxor %xmm3,%xmm3
leal 32(%edi),%edi
movups 32(%esi),%xmm7
subl $48,%eax
jmp .L090cbc_dec_tail_collected
.align 16
.L089cbc_dec_four:
call _aesni_decrypt4
movups 16(%esi),%xmm1
movups 32(%esi),%xmm0
xorps %xmm7,%xmm2
movups 48(%esi),%xmm7
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
xorps %xmm1,%xmm4
movups %xmm3,16(%edi)
pxor %xmm3,%xmm3
xorps %xmm0,%xmm5
movups %xmm4,32(%edi)
pxor %xmm4,%xmm4
leal 48(%edi),%edi
movaps %xmm5,%xmm2
pxor %xmm5,%xmm5
subl $64,%eax
jmp .L090cbc_dec_tail_collected
.align 16
.L085cbc_dec_clear_tail_collected:
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
.L090cbc_dec_tail_collected:
andl $15,%eax
jnz .L092cbc_dec_tail_partial
movups %xmm2,(%edi)
pxor %xmm0,%xmm0
jmp .L081cbc_ret
.align 16
.L092cbc_dec_tail_partial:
movaps %xmm2,(%esp)
pxor %xmm0,%xmm0
movl $16,%ecx
movl %esp,%esi
subl %eax,%ecx
.long 2767451785
movdqa %xmm2,(%esp)
.L081cbc_ret:
movl 16(%esp),%esp
movl 36(%esp),%ebp
pxor %xmm2,%xmm2
pxor %xmm1,%xmm1
movups %xmm7,(%ebp)
pxor %xmm7,%xmm7
.L076cbc_abort:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_cbc_encrypt,.-.L_aes_hw_cbc_encrypt_begin
.hidden _aesni_set_encrypt_key
.type _aesni_set_encrypt_key,@function
.align 16
_aesni_set_encrypt_key:
pushl %ebp
pushl %ebx
testl %eax,%eax
jz .L093bad_pointer
testl %edx,%edx
jz .L093bad_pointer
call .L094pic
.L094pic:
popl %ebx
leal .Lkey_const-.L094pic(%ebx),%ebx
leal OPENSSL_ia32cap_P-.Lkey_const(%ebx),%ebp
movups (%eax),%xmm0
xorps %xmm4,%xmm4
movl 4(%ebp),%ebp
leal 16(%edx),%edx
andl $268437504,%ebp
cmpl $256,%ecx
je .L09514rounds
cmpl $192,%ecx
je .L09612rounds
cmpl $128,%ecx
jne .L097bad_keybits
.align 16
.L09810rounds:
cmpl $268435456,%ebp
je .L09910rounds_alt
movl $9,%ecx
movups %xmm0,-16(%edx)
.byte 102,15,58,223,200,1
call .L100key_128_cold
.byte 102,15,58,223,200,2
call .L101key_128
.byte 102,15,58,223,200,4
call .L101key_128
.byte 102,15,58,223,200,8
call .L101key_128
.byte 102,15,58,223,200,16
call .L101key_128
.byte 102,15,58,223,200,32
call .L101key_128
.byte 102,15,58,223,200,64
call .L101key_128
.byte 102,15,58,223,200,128
call .L101key_128
.byte 102,15,58,223,200,27
call .L101key_128
.byte 102,15,58,223,200,54
call .L101key_128
movups %xmm0,(%edx)
movl %ecx,80(%edx)
jmp .L102good_key
.align 16
.L101key_128:
movups %xmm0,(%edx)
leal 16(%edx),%edx
.L100key_128_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.align 16
.L09910rounds_alt:
movdqa (%ebx),%xmm5
movl $8,%ecx
movdqa 32(%ebx),%xmm4
movdqa %xmm0,%xmm2
movdqu %xmm0,-16(%edx)
.L103loop_key128:
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
leal 16(%edx),%edx
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,-16(%edx)
movdqa %xmm0,%xmm2
decl %ecx
jnz .L103loop_key128
movdqa 48(%ebx),%xmm4
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,(%edx)
movdqa %xmm0,%xmm2
.byte 102,15,56,0,197
.byte 102,15,56,221,196
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,16(%edx)
movl $9,%ecx
movl %ecx,96(%edx)
jmp .L102good_key
.align 16
.L09612rounds:
movq 16(%eax),%xmm2
cmpl $268435456,%ebp
je .L10412rounds_alt
movl $11,%ecx
movups %xmm0,-16(%edx)
.byte 102,15,58,223,202,1
call .L105key_192a_cold
.byte 102,15,58,223,202,2
call .L106key_192b
.byte 102,15,58,223,202,4
call .L107key_192a
.byte 102,15,58,223,202,8
call .L106key_192b
.byte 102,15,58,223,202,16
call .L107key_192a
.byte 102,15,58,223,202,32
call .L106key_192b
.byte 102,15,58,223,202,64
call .L107key_192a
.byte 102,15,58,223,202,128
call .L106key_192b
movups %xmm0,(%edx)
movl %ecx,48(%edx)
jmp .L102good_key
.align 16
.L107key_192a:
movups %xmm0,(%edx)
leal 16(%edx),%edx
.align 16
.L105key_192a_cold:
movaps %xmm2,%xmm5
.L108key_192b_warm:
shufps $16,%xmm0,%xmm4
movdqa %xmm2,%xmm3
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
pslldq $4,%xmm3
xorps %xmm4,%xmm0
pshufd $85,%xmm1,%xmm1
pxor %xmm3,%xmm2
pxor %xmm1,%xmm0
pshufd $255,%xmm0,%xmm3
pxor %xmm3,%xmm2
ret
.align 16
.L106key_192b:
movaps %xmm0,%xmm3
shufps $68,%xmm0,%xmm5
movups %xmm5,(%edx)
shufps $78,%xmm2,%xmm3
movups %xmm3,16(%edx)
leal 32(%edx),%edx
jmp .L108key_192b_warm
.align 16
.L10412rounds_alt:
movdqa 16(%ebx),%xmm5
movdqa 32(%ebx),%xmm4
movl $8,%ecx
movdqu %xmm0,-16(%edx)
.L109loop_key192:
movq %xmm2,(%edx)
movdqa %xmm2,%xmm1
.byte 102,15,56,0,213
.byte 102,15,56,221,212
pslld $1,%xmm4
leal 24(%edx),%edx
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pshufd $255,%xmm0,%xmm3
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pxor %xmm2,%xmm0
pxor %xmm3,%xmm2
movdqu %xmm0,-16(%edx)
decl %ecx
jnz .L109loop_key192
movl $11,%ecx
movl %ecx,32(%edx)
jmp .L102good_key
.align 16
.L09514rounds:
movups 16(%eax),%xmm2
leal 16(%edx),%edx
cmpl $268435456,%ebp
je .L11014rounds_alt
movl $13,%ecx
movups %xmm0,-32(%edx)
movups %xmm2,-16(%edx)
.byte 102,15,58,223,202,1
call .L111key_256a_cold
.byte 102,15,58,223,200,1
call .L112key_256b
.byte 102,15,58,223,202,2
call .L113key_256a
.byte 102,15,58,223,200,2
call .L112key_256b
.byte 102,15,58,223,202,4
call .L113key_256a
.byte 102,15,58,223,200,4
call .L112key_256b
.byte 102,15,58,223,202,8
call .L113key_256a
.byte 102,15,58,223,200,8
call .L112key_256b
.byte 102,15,58,223,202,16
call .L113key_256a
.byte 102,15,58,223,200,16
call .L112key_256b
.byte 102,15,58,223,202,32
call .L113key_256a
.byte 102,15,58,223,200,32
call .L112key_256b
.byte 102,15,58,223,202,64
call .L113key_256a
movups %xmm0,(%edx)
movl %ecx,16(%edx)
xorl %eax,%eax
jmp .L102good_key
.align 16
.L113key_256a:
movups %xmm2,(%edx)
leal 16(%edx),%edx
.L111key_256a_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.align 16
.L112key_256b:
movups %xmm0,(%edx)
leal 16(%edx),%edx
shufps $16,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $140,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $170,%xmm1,%xmm1
xorps %xmm1,%xmm2
ret
.align 16
.L11014rounds_alt:
movdqa (%ebx),%xmm5
movdqa 32(%ebx),%xmm4
movl $7,%ecx
movdqu %xmm0,-32(%edx)
movdqa %xmm2,%xmm1
movdqu %xmm2,-16(%edx)
.L114loop_key256:
.byte 102,15,56,0,213
.byte 102,15,56,221,212
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pslld $1,%xmm4
pxor %xmm2,%xmm0
movdqu %xmm0,(%edx)
decl %ecx
jz .L115done_key256
pshufd $255,%xmm0,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,221,211
movdqa %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm3,%xmm1
pxor %xmm1,%xmm2
movdqu %xmm2,16(%edx)
leal 32(%edx),%edx
movdqa %xmm2,%xmm1
jmp .L114loop_key256
.L115done_key256:
movl $13,%ecx
movl %ecx,16(%edx)
.L102good_key:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
xorl %eax,%eax
popl %ebx
popl %ebp
ret
.align 4
.L093bad_pointer:
movl $-1,%eax
popl %ebx
popl %ebp
ret
.align 4
.L097bad_keybits:
pxor %xmm0,%xmm0
movl $-2,%eax
popl %ebx
popl %ebp
ret
.size _aesni_set_encrypt_key,.-_aesni_set_encrypt_key
.globl aes_hw_set_encrypt_key
.hidden aes_hw_set_encrypt_key
.type aes_hw_set_encrypt_key,@function
.align 16
aes_hw_set_encrypt_key:
.L_aes_hw_set_encrypt_key_begin:
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L116pic
.L116pic:
popl %ebx
leal BORINGSSL_function_hit+3-.L116pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 4(%esp),%eax
movl 8(%esp),%ecx
movl 12(%esp),%edx
call _aesni_set_encrypt_key
ret
.size aes_hw_set_encrypt_key,.-.L_aes_hw_set_encrypt_key_begin
.globl aes_hw_set_decrypt_key
.hidden aes_hw_set_decrypt_key
.type aes_hw_set_decrypt_key,@function
.align 16
aes_hw_set_decrypt_key:
.L_aes_hw_set_decrypt_key_begin:
movl 4(%esp),%eax
movl 8(%esp),%ecx
movl 12(%esp),%edx
call _aesni_set_encrypt_key
movl 12(%esp),%edx
shll $4,%ecx
testl %eax,%eax
jnz .L117dec_key_ret
leal 16(%edx,%ecx,1),%eax
movups (%edx),%xmm0
movups (%eax),%xmm1
movups %xmm0,(%eax)
movups %xmm1,(%edx)
leal 16(%edx),%edx
leal -16(%eax),%eax
.L118dec_key_inverse:
movups (%edx),%xmm0
movups (%eax),%xmm1
.byte 102,15,56,219,192
.byte 102,15,56,219,201
leal 16(%edx),%edx
leal -16(%eax),%eax
movups %xmm0,16(%eax)
movups %xmm1,-16(%edx)
cmpl %edx,%eax
ja .L118dec_key_inverse
movups (%edx),%xmm0
.byte 102,15,56,219,192
movups %xmm0,(%edx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
xorl %eax,%eax
.L117dec_key_ret:
ret
.size aes_hw_set_decrypt_key,.-.L_aes_hw_set_decrypt_key_begin
.align 64
.Lkey_const:
.long 202313229,202313229,202313229,202313229
.long 67569157,67569157,67569157,67569157
.long 1,1,1,1
.long 27,27,27,27
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69
.byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83
.byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115
.byte 115,108,46,111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
weix2025/toy
| 8,981
|
deps/boringssl/linux-x86/crypto/fipsmodule/x86-mont-linux.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl bn_mul_mont
.hidden bn_mul_mont
.type bn_mul_mont,@function
.align 16
bn_mul_mont:
.L_bn_mul_mont_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %eax,%eax
movl 40(%esp),%edi
cmpl $4,%edi
jl .L000just_leave
leal 20(%esp),%esi
leal 24(%esp),%edx
addl $2,%edi
negl %edi
leal -32(%esp,%edi,4),%ebp
negl %edi
movl %ebp,%eax
subl %edx,%eax
andl $2047,%eax
subl %eax,%ebp
xorl %ebp,%edx
andl $2048,%edx
xorl $2048,%edx
subl %edx,%ebp
andl $-64,%ebp
movl %esp,%eax
subl %ebp,%eax
andl $-4096,%eax
movl %esp,%edx
leal (%ebp,%eax,1),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja .L001page_walk
jmp .L002page_walk_done
.align 16
.L001page_walk:
leal -4096(%esp),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja .L001page_walk
.L002page_walk_done:
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%ebp
movl 16(%esi),%esi
movl (%esi),%esi
movl %eax,4(%esp)
movl %ebx,8(%esp)
movl %ecx,12(%esp)
movl %ebp,16(%esp)
movl %esi,20(%esp)
leal -3(%edi),%ebx
movl %edx,24(%esp)
call .L003PIC_me_up
.L003PIC_me_up:
popl %eax
leal OPENSSL_ia32cap_P-.L003PIC_me_up(%eax),%eax
btl $26,(%eax)
jnc .L004non_sse2
movl $-1,%eax
movd %eax,%mm7
movl 8(%esp),%esi
movl 12(%esp),%edi
movl 16(%esp),%ebp
xorl %edx,%edx
xorl %ecx,%ecx
movd (%edi),%mm4
movd (%esi),%mm5
movd (%ebp),%mm3
pmuludq %mm4,%mm5
movq %mm5,%mm2
movq %mm5,%mm0
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
incl %ecx
.align 16
.L0051st:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
leal 1(%ecx),%ecx
cmpl %ebx,%ecx
jl .L0051st
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm2,%mm3
movq %mm3,32(%esp,%ebx,4)
incl %edx
.L006outer:
xorl %ecx,%ecx
movd (%edi,%edx,4),%mm4
movd (%esi),%mm5
movd 32(%esp),%mm6
movd (%ebp),%mm3
pmuludq %mm4,%mm5
paddq %mm6,%mm5
movq %mm5,%mm0
movq %mm5,%mm2
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 36(%esp),%mm6
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm6,%mm2
incl %ecx
decl %ebx
.L007inner:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
movd 36(%esp,%ecx,4),%mm6
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
paddq %mm6,%mm2
decl %ebx
leal 1(%ecx),%ecx
jnz .L007inner
movl %ecx,%ebx
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
movd 36(%esp,%ebx,4),%mm6
paddq %mm2,%mm3
paddq %mm6,%mm3
movq %mm3,32(%esp,%ebx,4)
leal 1(%edx),%edx
cmpl %ebx,%edx
jle .L006outer
emms
jmp .L008common_tail
.align 16
.L004non_sse2:
movl 8(%esp),%esi
leal 1(%ebx),%ebp
movl 12(%esp),%edi
xorl %ecx,%ecx
movl %esi,%edx
andl $1,%ebp
subl %edi,%edx
leal 4(%edi,%ebx,4),%eax
orl %edx,%ebp
movl (%edi),%edi
jz .L009bn_sqr_mont
movl %eax,28(%esp)
movl (%esi),%eax
xorl %edx,%edx
.align 16
.L010mull:
movl %edx,%ebp
mull %edi
addl %eax,%ebp
leal 1(%ecx),%ecx
adcl $0,%edx
movl (%esi,%ecx,4),%eax
cmpl %ebx,%ecx
movl %ebp,28(%esp,%ecx,4)
jl .L010mull
movl %edx,%ebp
mull %edi
movl 20(%esp),%edi
addl %ebp,%eax
movl 16(%esp),%esi
adcl $0,%edx
imull 32(%esp),%edi
movl %eax,32(%esp,%ebx,4)
xorl %ecx,%ecx
movl %edx,36(%esp,%ebx,4)
movl %ecx,40(%esp,%ebx,4)
movl (%esi),%eax
mull %edi
addl 32(%esp),%eax
movl 4(%esi),%eax
adcl $0,%edx
incl %ecx
jmp .L0112ndmadd
.align 16
.L0121stmadd:
movl %edx,%ebp
mull %edi
addl 32(%esp,%ecx,4),%ebp
leal 1(%ecx),%ecx
adcl $0,%edx
addl %eax,%ebp
movl (%esi,%ecx,4),%eax
adcl $0,%edx
cmpl %ebx,%ecx
movl %ebp,28(%esp,%ecx,4)
jl .L0121stmadd
movl %edx,%ebp
mull %edi
addl 32(%esp,%ebx,4),%eax
movl 20(%esp),%edi
adcl $0,%edx
movl 16(%esp),%esi
addl %eax,%ebp
adcl $0,%edx
imull 32(%esp),%edi
xorl %ecx,%ecx
addl 36(%esp,%ebx,4),%edx
movl %ebp,32(%esp,%ebx,4)
adcl $0,%ecx
movl (%esi),%eax
movl %edx,36(%esp,%ebx,4)
movl %ecx,40(%esp,%ebx,4)
mull %edi
addl 32(%esp),%eax
movl 4(%esi),%eax
adcl $0,%edx
movl $1,%ecx
.align 16
.L0112ndmadd:
movl %edx,%ebp
mull %edi
addl 32(%esp,%ecx,4),%ebp
leal 1(%ecx),%ecx
adcl $0,%edx
addl %eax,%ebp
movl (%esi,%ecx,4),%eax
adcl $0,%edx
cmpl %ebx,%ecx
movl %ebp,24(%esp,%ecx,4)
jl .L0112ndmadd
movl %edx,%ebp
mull %edi
addl 32(%esp,%ebx,4),%ebp
adcl $0,%edx
addl %eax,%ebp
adcl $0,%edx
movl %ebp,28(%esp,%ebx,4)
xorl %eax,%eax
movl 12(%esp),%ecx
addl 36(%esp,%ebx,4),%edx
adcl 40(%esp,%ebx,4),%eax
leal 4(%ecx),%ecx
movl %edx,32(%esp,%ebx,4)
cmpl 28(%esp),%ecx
movl %eax,36(%esp,%ebx,4)
je .L008common_tail
movl (%ecx),%edi
movl 8(%esp),%esi
movl %ecx,12(%esp)
xorl %ecx,%ecx
xorl %edx,%edx
movl (%esi),%eax
jmp .L0121stmadd
.align 16
.L009bn_sqr_mont:
movl %ebx,(%esp)
movl %ecx,12(%esp)
movl %edi,%eax
mull %edi
movl %eax,32(%esp)
movl %edx,%ebx
shrl $1,%edx
andl $1,%ebx
incl %ecx
.align 16
.L013sqr:
movl (%esi,%ecx,4),%eax
movl %edx,%ebp
mull %edi
addl %ebp,%eax
leal 1(%ecx),%ecx
adcl $0,%edx
leal (%ebx,%eax,2),%ebp
shrl $31,%eax
cmpl (%esp),%ecx
movl %eax,%ebx
movl %ebp,28(%esp,%ecx,4)
jl .L013sqr
movl (%esi,%ecx,4),%eax
movl %edx,%ebp
mull %edi
addl %ebp,%eax
movl 20(%esp),%edi
adcl $0,%edx
movl 16(%esp),%esi
leal (%ebx,%eax,2),%ebp
imull 32(%esp),%edi
shrl $31,%eax
movl %ebp,32(%esp,%ecx,4)
leal (%eax,%edx,2),%ebp
movl (%esi),%eax
shrl $31,%edx
movl %ebp,36(%esp,%ecx,4)
movl %edx,40(%esp,%ecx,4)
mull %edi
addl 32(%esp),%eax
movl %ecx,%ebx
adcl $0,%edx
movl 4(%esi),%eax
movl $1,%ecx
.align 16
.L0143rdmadd:
movl %edx,%ebp
mull %edi
addl 32(%esp,%ecx,4),%ebp
adcl $0,%edx
addl %eax,%ebp
movl 4(%esi,%ecx,4),%eax
adcl $0,%edx
movl %ebp,28(%esp,%ecx,4)
movl %edx,%ebp
mull %edi
addl 36(%esp,%ecx,4),%ebp
leal 2(%ecx),%ecx
adcl $0,%edx
addl %eax,%ebp
movl (%esi,%ecx,4),%eax
adcl $0,%edx
cmpl %ebx,%ecx
movl %ebp,24(%esp,%ecx,4)
jl .L0143rdmadd
movl %edx,%ebp
mull %edi
addl 32(%esp,%ebx,4),%ebp
adcl $0,%edx
addl %eax,%ebp
adcl $0,%edx
movl %ebp,28(%esp,%ebx,4)
movl 12(%esp),%ecx
xorl %eax,%eax
movl 8(%esp),%esi
addl 36(%esp,%ebx,4),%edx
adcl 40(%esp,%ebx,4),%eax
movl %edx,32(%esp,%ebx,4)
cmpl %ebx,%ecx
movl %eax,36(%esp,%ebx,4)
je .L008common_tail
movl 4(%esi,%ecx,4),%edi
leal 1(%ecx),%ecx
movl %edi,%eax
movl %ecx,12(%esp)
mull %edi
addl 32(%esp,%ecx,4),%eax
adcl $0,%edx
movl %eax,32(%esp,%ecx,4)
xorl %ebp,%ebp
cmpl %ebx,%ecx
leal 1(%ecx),%ecx
je .L015sqrlast
movl %edx,%ebx
shrl $1,%edx
andl $1,%ebx
.align 16
.L016sqradd:
movl (%esi,%ecx,4),%eax
movl %edx,%ebp
mull %edi
addl %ebp,%eax
leal (%eax,%eax,1),%ebp
adcl $0,%edx
shrl $31,%eax
addl 32(%esp,%ecx,4),%ebp
leal 1(%ecx),%ecx
adcl $0,%eax
addl %ebx,%ebp
adcl $0,%eax
cmpl (%esp),%ecx
movl %ebp,28(%esp,%ecx,4)
movl %eax,%ebx
jle .L016sqradd
movl %edx,%ebp
addl %edx,%edx
shrl $31,%ebp
addl %ebx,%edx
adcl $0,%ebp
.L015sqrlast:
movl 20(%esp),%edi
movl 16(%esp),%esi
imull 32(%esp),%edi
addl 32(%esp,%ecx,4),%edx
movl (%esi),%eax
adcl $0,%ebp
movl %edx,32(%esp,%ecx,4)
movl %ebp,36(%esp,%ecx,4)
mull %edi
addl 32(%esp),%eax
leal -1(%ecx),%ebx
adcl $0,%edx
movl $1,%ecx
movl 4(%esi),%eax
jmp .L0143rdmadd
.align 16
.L008common_tail:
movl 16(%esp),%ebp
movl 4(%esp),%edi
leal 32(%esp),%esi
movl (%esi),%eax
movl %ebx,%ecx
xorl %edx,%edx
.align 16
.L017sub:
sbbl (%ebp,%edx,4),%eax
movl %eax,(%edi,%edx,4)
decl %ecx
movl 4(%esi,%edx,4),%eax
leal 1(%edx),%edx
jge .L017sub
sbbl $0,%eax
movl $-1,%edx
xorl %eax,%edx
jmp .L018copy
.align 16
.L018copy:
movl 32(%esp,%ebx,4),%esi
movl (%edi,%ebx,4),%ebp
movl %ecx,32(%esp,%ebx,4)
andl %eax,%esi
andl %edx,%ebp
orl %esi,%ebp
movl %ebp,(%edi,%ebx,4)
decl %ebx
jge .L018copy
movl 24(%esp),%esp
movl $1,%eax
.L000just_leave:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_mul_mont,.-.L_bn_mul_mont_begin
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105
.byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56
.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121
.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46
.byte 111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
weix2025/toy
| 67,825
|
deps/boringssl/linux-x86/crypto/fipsmodule/sha1-586-linux.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl sha1_block_data_order
.hidden sha1_block_data_order
.type sha1_block_data_order,@function
.align 16
sha1_block_data_order:
.L_sha1_block_data_order_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
call .L000pic_point
.L000pic_point:
popl %ebp
leal OPENSSL_ia32cap_P-.L000pic_point(%ebp),%esi
leal .LK_XX_XX-.L000pic_point(%ebp),%ebp
movl (%esi),%eax
movl 4(%esi),%edx
testl $512,%edx
jz .L001x86
movl 8(%esi),%ecx
testl $16777216,%eax
jz .L001x86
andl $268435456,%edx
andl $1073741824,%eax
orl %edx,%eax
cmpl $1342177280,%eax
je .Lavx_shortcut
jmp .Lssse3_shortcut
.align 16
.L001x86:
movl 20(%esp),%ebp
movl 24(%esp),%esi
movl 28(%esp),%eax
subl $76,%esp
shll $6,%eax
addl %esi,%eax
movl %eax,104(%esp)
movl 16(%ebp),%edi
jmp .L002loop
.align 16
.L002loop:
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
movl %eax,(%esp)
movl %ebx,4(%esp)
movl %ecx,8(%esp)
movl %edx,12(%esp)
movl 16(%esi),%eax
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
movl %eax,16(%esp)
movl %ebx,20(%esp)
movl %ecx,24(%esp)
movl %edx,28(%esp)
movl 32(%esi),%eax
movl 36(%esi),%ebx
movl 40(%esi),%ecx
movl 44(%esi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
movl %eax,32(%esp)
movl %ebx,36(%esp)
movl %ecx,40(%esp)
movl %edx,44(%esp)
movl 48(%esi),%eax
movl 52(%esi),%ebx
movl 56(%esi),%ecx
movl 60(%esi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
movl %eax,48(%esp)
movl %ebx,52(%esp)
movl %ecx,56(%esp)
movl %edx,60(%esp)
movl %esi,100(%esp)
movl (%ebp),%eax
movl 4(%ebp),%ebx
movl 8(%ebp),%ecx
movl 12(%ebp),%edx
movl %ecx,%esi
movl %eax,%ebp
roll $5,%ebp
xorl %edx,%esi
addl %edi,%ebp
movl (%esp),%edi
andl %ebx,%esi
rorl $2,%ebx
xorl %edx,%esi
leal 1518500249(%ebp,%edi,1),%ebp
addl %esi,%ebp
movl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
xorl %ecx,%edi
addl %edx,%ebp
movl 4(%esp),%edx
andl %eax,%edi
rorl $2,%eax
xorl %ecx,%edi
leal 1518500249(%ebp,%edx,1),%ebp
addl %edi,%ebp
movl %eax,%edx
movl %ebp,%edi
roll $5,%ebp
xorl %ebx,%edx
addl %ecx,%ebp
movl 8(%esp),%ecx
andl %esi,%edx
rorl $2,%esi
xorl %ebx,%edx
leal 1518500249(%ebp,%ecx,1),%ebp
addl %edx,%ebp
movl %esi,%ecx
movl %ebp,%edx
roll $5,%ebp
xorl %eax,%ecx
addl %ebx,%ebp
movl 12(%esp),%ebx
andl %edi,%ecx
rorl $2,%edi
xorl %eax,%ecx
leal 1518500249(%ebp,%ebx,1),%ebp
addl %ecx,%ebp
movl %edi,%ebx
movl %ebp,%ecx
roll $5,%ebp
xorl %esi,%ebx
addl %eax,%ebp
movl 16(%esp),%eax
andl %edx,%ebx
rorl $2,%edx
xorl %esi,%ebx
leal 1518500249(%ebp,%eax,1),%ebp
addl %ebx,%ebp
movl %edx,%eax
movl %ebp,%ebx
roll $5,%ebp
xorl %edi,%eax
addl %esi,%ebp
movl 20(%esp),%esi
andl %ecx,%eax
rorl $2,%ecx
xorl %edi,%eax
leal 1518500249(%ebp,%esi,1),%ebp
addl %eax,%ebp
movl %ecx,%esi
movl %ebp,%eax
roll $5,%ebp
xorl %edx,%esi
addl %edi,%ebp
movl 24(%esp),%edi
andl %ebx,%esi
rorl $2,%ebx
xorl %edx,%esi
leal 1518500249(%ebp,%edi,1),%ebp
addl %esi,%ebp
movl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
xorl %ecx,%edi
addl %edx,%ebp
movl 28(%esp),%edx
andl %eax,%edi
rorl $2,%eax
xorl %ecx,%edi
leal 1518500249(%ebp,%edx,1),%ebp
addl %edi,%ebp
movl %eax,%edx
movl %ebp,%edi
roll $5,%ebp
xorl %ebx,%edx
addl %ecx,%ebp
movl 32(%esp),%ecx
andl %esi,%edx
rorl $2,%esi
xorl %ebx,%edx
leal 1518500249(%ebp,%ecx,1),%ebp
addl %edx,%ebp
movl %esi,%ecx
movl %ebp,%edx
roll $5,%ebp
xorl %eax,%ecx
addl %ebx,%ebp
movl 36(%esp),%ebx
andl %edi,%ecx
rorl $2,%edi
xorl %eax,%ecx
leal 1518500249(%ebp,%ebx,1),%ebp
addl %ecx,%ebp
movl %edi,%ebx
movl %ebp,%ecx
roll $5,%ebp
xorl %esi,%ebx
addl %eax,%ebp
movl 40(%esp),%eax
andl %edx,%ebx
rorl $2,%edx
xorl %esi,%ebx
leal 1518500249(%ebp,%eax,1),%ebp
addl %ebx,%ebp
movl %edx,%eax
movl %ebp,%ebx
roll $5,%ebp
xorl %edi,%eax
addl %esi,%ebp
movl 44(%esp),%esi
andl %ecx,%eax
rorl $2,%ecx
xorl %edi,%eax
leal 1518500249(%ebp,%esi,1),%ebp
addl %eax,%ebp
movl %ecx,%esi
movl %ebp,%eax
roll $5,%ebp
xorl %edx,%esi
addl %edi,%ebp
movl 48(%esp),%edi
andl %ebx,%esi
rorl $2,%ebx
xorl %edx,%esi
leal 1518500249(%ebp,%edi,1),%ebp
addl %esi,%ebp
movl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
xorl %ecx,%edi
addl %edx,%ebp
movl 52(%esp),%edx
andl %eax,%edi
rorl $2,%eax
xorl %ecx,%edi
leal 1518500249(%ebp,%edx,1),%ebp
addl %edi,%ebp
movl %eax,%edx
movl %ebp,%edi
roll $5,%ebp
xorl %ebx,%edx
addl %ecx,%ebp
movl 56(%esp),%ecx
andl %esi,%edx
rorl $2,%esi
xorl %ebx,%edx
leal 1518500249(%ebp,%ecx,1),%ebp
addl %edx,%ebp
movl %esi,%ecx
movl %ebp,%edx
roll $5,%ebp
xorl %eax,%ecx
addl %ebx,%ebp
movl 60(%esp),%ebx
andl %edi,%ecx
rorl $2,%edi
xorl %eax,%ecx
leal 1518500249(%ebp,%ebx,1),%ebp
movl (%esp),%ebx
addl %ebp,%ecx
movl %edi,%ebp
xorl 8(%esp),%ebx
xorl %esi,%ebp
xorl 32(%esp),%ebx
andl %edx,%ebp
xorl 52(%esp),%ebx
roll $1,%ebx
xorl %esi,%ebp
addl %ebp,%eax
movl %ecx,%ebp
rorl $2,%edx
movl %ebx,(%esp)
roll $5,%ebp
leal 1518500249(%ebx,%eax,1),%ebx
movl 4(%esp),%eax
addl %ebp,%ebx
movl %edx,%ebp
xorl 12(%esp),%eax
xorl %edi,%ebp
xorl 36(%esp),%eax
andl %ecx,%ebp
xorl 56(%esp),%eax
roll $1,%eax
xorl %edi,%ebp
addl %ebp,%esi
movl %ebx,%ebp
rorl $2,%ecx
movl %eax,4(%esp)
roll $5,%ebp
leal 1518500249(%eax,%esi,1),%eax
movl 8(%esp),%esi
addl %ebp,%eax
movl %ecx,%ebp
xorl 16(%esp),%esi
xorl %edx,%ebp
xorl 40(%esp),%esi
andl %ebx,%ebp
xorl 60(%esp),%esi
roll $1,%esi
xorl %edx,%ebp
addl %ebp,%edi
movl %eax,%ebp
rorl $2,%ebx
movl %esi,8(%esp)
roll $5,%ebp
leal 1518500249(%esi,%edi,1),%esi
movl 12(%esp),%edi
addl %ebp,%esi
movl %ebx,%ebp
xorl 20(%esp),%edi
xorl %ecx,%ebp
xorl 44(%esp),%edi
andl %eax,%ebp
xorl (%esp),%edi
roll $1,%edi
xorl %ecx,%ebp
addl %ebp,%edx
movl %esi,%ebp
rorl $2,%eax
movl %edi,12(%esp)
roll $5,%ebp
leal 1518500249(%edi,%edx,1),%edi
movl 16(%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl 24(%esp),%edx
xorl %eax,%ebp
xorl 48(%esp),%edx
xorl %ebx,%ebp
xorl 4(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,16(%esp)
leal 1859775393(%edx,%ecx,1),%edx
movl 20(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 28(%esp),%ecx
xorl %esi,%ebp
xorl 52(%esp),%ecx
xorl %eax,%ebp
xorl 8(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,20(%esp)
leal 1859775393(%ecx,%ebx,1),%ecx
movl 24(%esp),%ebx
addl %ebp,%ecx
movl %edx,%ebp
xorl 32(%esp),%ebx
xorl %edi,%ebp
xorl 56(%esp),%ebx
xorl %esi,%ebp
xorl 12(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,24(%esp)
leal 1859775393(%ebx,%eax,1),%ebx
movl 28(%esp),%eax
addl %ebp,%ebx
movl %ecx,%ebp
xorl 36(%esp),%eax
xorl %edx,%ebp
xorl 60(%esp),%eax
xorl %edi,%ebp
xorl 16(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,28(%esp)
leal 1859775393(%eax,%esi,1),%eax
movl 32(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl 40(%esp),%esi
xorl %ecx,%ebp
xorl (%esp),%esi
xorl %edx,%ebp
xorl 20(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,32(%esp)
leal 1859775393(%esi,%edi,1),%esi
movl 36(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 44(%esp),%edi
xorl %ebx,%ebp
xorl 4(%esp),%edi
xorl %ecx,%ebp
xorl 24(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,36(%esp)
leal 1859775393(%edi,%edx,1),%edi
movl 40(%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl 48(%esp),%edx
xorl %eax,%ebp
xorl 8(%esp),%edx
xorl %ebx,%ebp
xorl 28(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,40(%esp)
leal 1859775393(%edx,%ecx,1),%edx
movl 44(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 52(%esp),%ecx
xorl %esi,%ebp
xorl 12(%esp),%ecx
xorl %eax,%ebp
xorl 32(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,44(%esp)
leal 1859775393(%ecx,%ebx,1),%ecx
movl 48(%esp),%ebx
addl %ebp,%ecx
movl %edx,%ebp
xorl 56(%esp),%ebx
xorl %edi,%ebp
xorl 16(%esp),%ebx
xorl %esi,%ebp
xorl 36(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,48(%esp)
leal 1859775393(%ebx,%eax,1),%ebx
movl 52(%esp),%eax
addl %ebp,%ebx
movl %ecx,%ebp
xorl 60(%esp),%eax
xorl %edx,%ebp
xorl 20(%esp),%eax
xorl %edi,%ebp
xorl 40(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,52(%esp)
leal 1859775393(%eax,%esi,1),%eax
movl 56(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl (%esp),%esi
xorl %ecx,%ebp
xorl 24(%esp),%esi
xorl %edx,%ebp
xorl 44(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,56(%esp)
leal 1859775393(%esi,%edi,1),%esi
movl 60(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 4(%esp),%edi
xorl %ebx,%ebp
xorl 28(%esp),%edi
xorl %ecx,%ebp
xorl 48(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,60(%esp)
leal 1859775393(%edi,%edx,1),%edi
movl (%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl 8(%esp),%edx
xorl %eax,%ebp
xorl 32(%esp),%edx
xorl %ebx,%ebp
xorl 52(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,(%esp)
leal 1859775393(%edx,%ecx,1),%edx
movl 4(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 12(%esp),%ecx
xorl %esi,%ebp
xorl 36(%esp),%ecx
xorl %eax,%ebp
xorl 56(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,4(%esp)
leal 1859775393(%ecx,%ebx,1),%ecx
movl 8(%esp),%ebx
addl %ebp,%ecx
movl %edx,%ebp
xorl 16(%esp),%ebx
xorl %edi,%ebp
xorl 40(%esp),%ebx
xorl %esi,%ebp
xorl 60(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,8(%esp)
leal 1859775393(%ebx,%eax,1),%ebx
movl 12(%esp),%eax
addl %ebp,%ebx
movl %ecx,%ebp
xorl 20(%esp),%eax
xorl %edx,%ebp
xorl 44(%esp),%eax
xorl %edi,%ebp
xorl (%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,12(%esp)
leal 1859775393(%eax,%esi,1),%eax
movl 16(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl 24(%esp),%esi
xorl %ecx,%ebp
xorl 48(%esp),%esi
xorl %edx,%ebp
xorl 4(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,16(%esp)
leal 1859775393(%esi,%edi,1),%esi
movl 20(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 28(%esp),%edi
xorl %ebx,%ebp
xorl 52(%esp),%edi
xorl %ecx,%ebp
xorl 8(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,20(%esp)
leal 1859775393(%edi,%edx,1),%edi
movl 24(%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl 32(%esp),%edx
xorl %eax,%ebp
xorl 56(%esp),%edx
xorl %ebx,%ebp
xorl 12(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,24(%esp)
leal 1859775393(%edx,%ecx,1),%edx
movl 28(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 36(%esp),%ecx
xorl %esi,%ebp
xorl 60(%esp),%ecx
xorl %eax,%ebp
xorl 16(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,28(%esp)
leal 1859775393(%ecx,%ebx,1),%ecx
movl 32(%esp),%ebx
addl %ebp,%ecx
movl %edi,%ebp
xorl 40(%esp),%ebx
xorl %esi,%ebp
xorl (%esp),%ebx
andl %edx,%ebp
xorl 20(%esp),%ebx
roll $1,%ebx
addl %eax,%ebp
rorl $2,%edx
movl %ecx,%eax
roll $5,%eax
movl %ebx,32(%esp)
leal 2400959708(%ebx,%ebp,1),%ebx
movl %edi,%ebp
addl %eax,%ebx
andl %esi,%ebp
movl 36(%esp),%eax
addl %ebp,%ebx
movl %edx,%ebp
xorl 44(%esp),%eax
xorl %edi,%ebp
xorl 4(%esp),%eax
andl %ecx,%ebp
xorl 24(%esp),%eax
roll $1,%eax
addl %esi,%ebp
rorl $2,%ecx
movl %ebx,%esi
roll $5,%esi
movl %eax,36(%esp)
leal 2400959708(%eax,%ebp,1),%eax
movl %edx,%ebp
addl %esi,%eax
andl %edi,%ebp
movl 40(%esp),%esi
addl %ebp,%eax
movl %ecx,%ebp
xorl 48(%esp),%esi
xorl %edx,%ebp
xorl 8(%esp),%esi
andl %ebx,%ebp
xorl 28(%esp),%esi
roll $1,%esi
addl %edi,%ebp
rorl $2,%ebx
movl %eax,%edi
roll $5,%edi
movl %esi,40(%esp)
leal 2400959708(%esi,%ebp,1),%esi
movl %ecx,%ebp
addl %edi,%esi
andl %edx,%ebp
movl 44(%esp),%edi
addl %ebp,%esi
movl %ebx,%ebp
xorl 52(%esp),%edi
xorl %ecx,%ebp
xorl 12(%esp),%edi
andl %eax,%ebp
xorl 32(%esp),%edi
roll $1,%edi
addl %edx,%ebp
rorl $2,%eax
movl %esi,%edx
roll $5,%edx
movl %edi,44(%esp)
leal 2400959708(%edi,%ebp,1),%edi
movl %ebx,%ebp
addl %edx,%edi
andl %ecx,%ebp
movl 48(%esp),%edx
addl %ebp,%edi
movl %eax,%ebp
xorl 56(%esp),%edx
xorl %ebx,%ebp
xorl 16(%esp),%edx
andl %esi,%ebp
xorl 36(%esp),%edx
roll $1,%edx
addl %ecx,%ebp
rorl $2,%esi
movl %edi,%ecx
roll $5,%ecx
movl %edx,48(%esp)
leal 2400959708(%edx,%ebp,1),%edx
movl %eax,%ebp
addl %ecx,%edx
andl %ebx,%ebp
movl 52(%esp),%ecx
addl %ebp,%edx
movl %esi,%ebp
xorl 60(%esp),%ecx
xorl %eax,%ebp
xorl 20(%esp),%ecx
andl %edi,%ebp
xorl 40(%esp),%ecx
roll $1,%ecx
addl %ebx,%ebp
rorl $2,%edi
movl %edx,%ebx
roll $5,%ebx
movl %ecx,52(%esp)
leal 2400959708(%ecx,%ebp,1),%ecx
movl %esi,%ebp
addl %ebx,%ecx
andl %eax,%ebp
movl 56(%esp),%ebx
addl %ebp,%ecx
movl %edi,%ebp
xorl (%esp),%ebx
xorl %esi,%ebp
xorl 24(%esp),%ebx
andl %edx,%ebp
xorl 44(%esp),%ebx
roll $1,%ebx
addl %eax,%ebp
rorl $2,%edx
movl %ecx,%eax
roll $5,%eax
movl %ebx,56(%esp)
leal 2400959708(%ebx,%ebp,1),%ebx
movl %edi,%ebp
addl %eax,%ebx
andl %esi,%ebp
movl 60(%esp),%eax
addl %ebp,%ebx
movl %edx,%ebp
xorl 4(%esp),%eax
xorl %edi,%ebp
xorl 28(%esp),%eax
andl %ecx,%ebp
xorl 48(%esp),%eax
roll $1,%eax
addl %esi,%ebp
rorl $2,%ecx
movl %ebx,%esi
roll $5,%esi
movl %eax,60(%esp)
leal 2400959708(%eax,%ebp,1),%eax
movl %edx,%ebp
addl %esi,%eax
andl %edi,%ebp
movl (%esp),%esi
addl %ebp,%eax
movl %ecx,%ebp
xorl 8(%esp),%esi
xorl %edx,%ebp
xorl 32(%esp),%esi
andl %ebx,%ebp
xorl 52(%esp),%esi
roll $1,%esi
addl %edi,%ebp
rorl $2,%ebx
movl %eax,%edi
roll $5,%edi
movl %esi,(%esp)
leal 2400959708(%esi,%ebp,1),%esi
movl %ecx,%ebp
addl %edi,%esi
andl %edx,%ebp
movl 4(%esp),%edi
addl %ebp,%esi
movl %ebx,%ebp
xorl 12(%esp),%edi
xorl %ecx,%ebp
xorl 36(%esp),%edi
andl %eax,%ebp
xorl 56(%esp),%edi
roll $1,%edi
addl %edx,%ebp
rorl $2,%eax
movl %esi,%edx
roll $5,%edx
movl %edi,4(%esp)
leal 2400959708(%edi,%ebp,1),%edi
movl %ebx,%ebp
addl %edx,%edi
andl %ecx,%ebp
movl 8(%esp),%edx
addl %ebp,%edi
movl %eax,%ebp
xorl 16(%esp),%edx
xorl %ebx,%ebp
xorl 40(%esp),%edx
andl %esi,%ebp
xorl 60(%esp),%edx
roll $1,%edx
addl %ecx,%ebp
rorl $2,%esi
movl %edi,%ecx
roll $5,%ecx
movl %edx,8(%esp)
leal 2400959708(%edx,%ebp,1),%edx
movl %eax,%ebp
addl %ecx,%edx
andl %ebx,%ebp
movl 12(%esp),%ecx
addl %ebp,%edx
movl %esi,%ebp
xorl 20(%esp),%ecx
xorl %eax,%ebp
xorl 44(%esp),%ecx
andl %edi,%ebp
xorl (%esp),%ecx
roll $1,%ecx
addl %ebx,%ebp
rorl $2,%edi
movl %edx,%ebx
roll $5,%ebx
movl %ecx,12(%esp)
leal 2400959708(%ecx,%ebp,1),%ecx
movl %esi,%ebp
addl %ebx,%ecx
andl %eax,%ebp
movl 16(%esp),%ebx
addl %ebp,%ecx
movl %edi,%ebp
xorl 24(%esp),%ebx
xorl %esi,%ebp
xorl 48(%esp),%ebx
andl %edx,%ebp
xorl 4(%esp),%ebx
roll $1,%ebx
addl %eax,%ebp
rorl $2,%edx
movl %ecx,%eax
roll $5,%eax
movl %ebx,16(%esp)
leal 2400959708(%ebx,%ebp,1),%ebx
movl %edi,%ebp
addl %eax,%ebx
andl %esi,%ebp
movl 20(%esp),%eax
addl %ebp,%ebx
movl %edx,%ebp
xorl 28(%esp),%eax
xorl %edi,%ebp
xorl 52(%esp),%eax
andl %ecx,%ebp
xorl 8(%esp),%eax
roll $1,%eax
addl %esi,%ebp
rorl $2,%ecx
movl %ebx,%esi
roll $5,%esi
movl %eax,20(%esp)
leal 2400959708(%eax,%ebp,1),%eax
movl %edx,%ebp
addl %esi,%eax
andl %edi,%ebp
movl 24(%esp),%esi
addl %ebp,%eax
movl %ecx,%ebp
xorl 32(%esp),%esi
xorl %edx,%ebp
xorl 56(%esp),%esi
andl %ebx,%ebp
xorl 12(%esp),%esi
roll $1,%esi
addl %edi,%ebp
rorl $2,%ebx
movl %eax,%edi
roll $5,%edi
movl %esi,24(%esp)
leal 2400959708(%esi,%ebp,1),%esi
movl %ecx,%ebp
addl %edi,%esi
andl %edx,%ebp
movl 28(%esp),%edi
addl %ebp,%esi
movl %ebx,%ebp
xorl 36(%esp),%edi
xorl %ecx,%ebp
xorl 60(%esp),%edi
andl %eax,%ebp
xorl 16(%esp),%edi
roll $1,%edi
addl %edx,%ebp
rorl $2,%eax
movl %esi,%edx
roll $5,%edx
movl %edi,28(%esp)
leal 2400959708(%edi,%ebp,1),%edi
movl %ebx,%ebp
addl %edx,%edi
andl %ecx,%ebp
movl 32(%esp),%edx
addl %ebp,%edi
movl %eax,%ebp
xorl 40(%esp),%edx
xorl %ebx,%ebp
xorl (%esp),%edx
andl %esi,%ebp
xorl 20(%esp),%edx
roll $1,%edx
addl %ecx,%ebp
rorl $2,%esi
movl %edi,%ecx
roll $5,%ecx
movl %edx,32(%esp)
leal 2400959708(%edx,%ebp,1),%edx
movl %eax,%ebp
addl %ecx,%edx
andl %ebx,%ebp
movl 36(%esp),%ecx
addl %ebp,%edx
movl %esi,%ebp
xorl 44(%esp),%ecx
xorl %eax,%ebp
xorl 4(%esp),%ecx
andl %edi,%ebp
xorl 24(%esp),%ecx
roll $1,%ecx
addl %ebx,%ebp
rorl $2,%edi
movl %edx,%ebx
roll $5,%ebx
movl %ecx,36(%esp)
leal 2400959708(%ecx,%ebp,1),%ecx
movl %esi,%ebp
addl %ebx,%ecx
andl %eax,%ebp
movl 40(%esp),%ebx
addl %ebp,%ecx
movl %edi,%ebp
xorl 48(%esp),%ebx
xorl %esi,%ebp
xorl 8(%esp),%ebx
andl %edx,%ebp
xorl 28(%esp),%ebx
roll $1,%ebx
addl %eax,%ebp
rorl $2,%edx
movl %ecx,%eax
roll $5,%eax
movl %ebx,40(%esp)
leal 2400959708(%ebx,%ebp,1),%ebx
movl %edi,%ebp
addl %eax,%ebx
andl %esi,%ebp
movl 44(%esp),%eax
addl %ebp,%ebx
movl %edx,%ebp
xorl 52(%esp),%eax
xorl %edi,%ebp
xorl 12(%esp),%eax
andl %ecx,%ebp
xorl 32(%esp),%eax
roll $1,%eax
addl %esi,%ebp
rorl $2,%ecx
movl %ebx,%esi
roll $5,%esi
movl %eax,44(%esp)
leal 2400959708(%eax,%ebp,1),%eax
movl %edx,%ebp
addl %esi,%eax
andl %edi,%ebp
movl 48(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl 56(%esp),%esi
xorl %ecx,%ebp
xorl 16(%esp),%esi
xorl %edx,%ebp
xorl 36(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,48(%esp)
leal 3395469782(%esi,%edi,1),%esi
movl 52(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 60(%esp),%edi
xorl %ebx,%ebp
xorl 20(%esp),%edi
xorl %ecx,%ebp
xorl 40(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,52(%esp)
leal 3395469782(%edi,%edx,1),%edi
movl 56(%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl (%esp),%edx
xorl %eax,%ebp
xorl 24(%esp),%edx
xorl %ebx,%ebp
xorl 44(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,56(%esp)
leal 3395469782(%edx,%ecx,1),%edx
movl 60(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 4(%esp),%ecx
xorl %esi,%ebp
xorl 28(%esp),%ecx
xorl %eax,%ebp
xorl 48(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,60(%esp)
leal 3395469782(%ecx,%ebx,1),%ecx
movl (%esp),%ebx
addl %ebp,%ecx
movl %edx,%ebp
xorl 8(%esp),%ebx
xorl %edi,%ebp
xorl 32(%esp),%ebx
xorl %esi,%ebp
xorl 52(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,(%esp)
leal 3395469782(%ebx,%eax,1),%ebx
movl 4(%esp),%eax
addl %ebp,%ebx
movl %ecx,%ebp
xorl 12(%esp),%eax
xorl %edx,%ebp
xorl 36(%esp),%eax
xorl %edi,%ebp
xorl 56(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,4(%esp)
leal 3395469782(%eax,%esi,1),%eax
movl 8(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl 16(%esp),%esi
xorl %ecx,%ebp
xorl 40(%esp),%esi
xorl %edx,%ebp
xorl 60(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,8(%esp)
leal 3395469782(%esi,%edi,1),%esi
movl 12(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 20(%esp),%edi
xorl %ebx,%ebp
xorl 44(%esp),%edi
xorl %ecx,%ebp
xorl (%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,12(%esp)
leal 3395469782(%edi,%edx,1),%edi
movl 16(%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl 24(%esp),%edx
xorl %eax,%ebp
xorl 48(%esp),%edx
xorl %ebx,%ebp
xorl 4(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,16(%esp)
leal 3395469782(%edx,%ecx,1),%edx
movl 20(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 28(%esp),%ecx
xorl %esi,%ebp
xorl 52(%esp),%ecx
xorl %eax,%ebp
xorl 8(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,20(%esp)
leal 3395469782(%ecx,%ebx,1),%ecx
movl 24(%esp),%ebx
addl %ebp,%ecx
movl %edx,%ebp
xorl 32(%esp),%ebx
xorl %edi,%ebp
xorl 56(%esp),%ebx
xorl %esi,%ebp
xorl 12(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,24(%esp)
leal 3395469782(%ebx,%eax,1),%ebx
movl 28(%esp),%eax
addl %ebp,%ebx
movl %ecx,%ebp
xorl 36(%esp),%eax
xorl %edx,%ebp
xorl 60(%esp),%eax
xorl %edi,%ebp
xorl 16(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,28(%esp)
leal 3395469782(%eax,%esi,1),%eax
movl 32(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl 40(%esp),%esi
xorl %ecx,%ebp
xorl (%esp),%esi
xorl %edx,%ebp
xorl 20(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,32(%esp)
leal 3395469782(%esi,%edi,1),%esi
movl 36(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 44(%esp),%edi
xorl %ebx,%ebp
xorl 4(%esp),%edi
xorl %ecx,%ebp
xorl 24(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,36(%esp)
leal 3395469782(%edi,%edx,1),%edi
movl 40(%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl 48(%esp),%edx
xorl %eax,%ebp
xorl 8(%esp),%edx
xorl %ebx,%ebp
xorl 28(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,40(%esp)
leal 3395469782(%edx,%ecx,1),%edx
movl 44(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 52(%esp),%ecx
xorl %esi,%ebp
xorl 12(%esp),%ecx
xorl %eax,%ebp
xorl 32(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,44(%esp)
leal 3395469782(%ecx,%ebx,1),%ecx
movl 48(%esp),%ebx
addl %ebp,%ecx
movl %edx,%ebp
xorl 56(%esp),%ebx
xorl %edi,%ebp
xorl 16(%esp),%ebx
xorl %esi,%ebp
xorl 36(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,48(%esp)
leal 3395469782(%ebx,%eax,1),%ebx
movl 52(%esp),%eax
addl %ebp,%ebx
movl %ecx,%ebp
xorl 60(%esp),%eax
xorl %edx,%ebp
xorl 20(%esp),%eax
xorl %edi,%ebp
xorl 40(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
leal 3395469782(%eax,%esi,1),%eax
movl 56(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl (%esp),%esi
xorl %ecx,%ebp
xorl 24(%esp),%esi
xorl %edx,%ebp
xorl 44(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
leal 3395469782(%esi,%edi,1),%esi
movl 60(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 4(%esp),%edi
xorl %ebx,%ebp
xorl 28(%esp),%edi
xorl %ecx,%ebp
xorl 48(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
leal 3395469782(%edi,%edx,1),%edi
addl %ebp,%edi
movl 96(%esp),%ebp
movl 100(%esp),%edx
addl (%ebp),%edi
addl 4(%ebp),%esi
addl 8(%ebp),%eax
addl 12(%ebp),%ebx
addl 16(%ebp),%ecx
movl %edi,(%ebp)
addl $64,%edx
movl %esi,4(%ebp)
cmpl 104(%esp),%edx
movl %eax,8(%ebp)
movl %ecx,%edi
movl %ebx,12(%ebp)
movl %edx,%esi
movl %ecx,16(%ebp)
jb .L002loop
addl $76,%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size sha1_block_data_order,.-.L_sha1_block_data_order_begin
.hidden _sha1_block_data_order_ssse3
.type _sha1_block_data_order_ssse3,@function
.align 16
_sha1_block_data_order_ssse3:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
call .L003pic_point
.L003pic_point:
popl %ebp
leal .LK_XX_XX-.L003pic_point(%ebp),%ebp
.Lssse3_shortcut:
movdqa (%ebp),%xmm7
movdqa 16(%ebp),%xmm0
movdqa 32(%ebp),%xmm1
movdqa 48(%ebp),%xmm2
movdqa 64(%ebp),%xmm6
movl 20(%esp),%edi
movl 24(%esp),%ebp
movl 28(%esp),%edx
movl %esp,%esi
subl $208,%esp
andl $-64,%esp
movdqa %xmm0,112(%esp)
movdqa %xmm1,128(%esp)
movdqa %xmm2,144(%esp)
shll $6,%edx
movdqa %xmm7,160(%esp)
addl %ebp,%edx
movdqa %xmm6,176(%esp)
addl $64,%ebp
movl %edi,192(%esp)
movl %ebp,196(%esp)
movl %edx,200(%esp)
movl %esi,204(%esp)
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
movl 16(%edi),%edi
movl %ebx,%esi
movdqu -64(%ebp),%xmm0
movdqu -48(%ebp),%xmm1
movdqu -32(%ebp),%xmm2
movdqu -16(%ebp),%xmm3
.byte 102,15,56,0,198
.byte 102,15,56,0,206
.byte 102,15,56,0,214
movdqa %xmm7,96(%esp)
.byte 102,15,56,0,222
paddd %xmm7,%xmm0
paddd %xmm7,%xmm1
paddd %xmm7,%xmm2
movdqa %xmm0,(%esp)
psubd %xmm7,%xmm0
movdqa %xmm1,16(%esp)
psubd %xmm7,%xmm1
movdqa %xmm2,32(%esp)
movl %ecx,%ebp
psubd %xmm7,%xmm2
xorl %edx,%ebp
pshufd $238,%xmm0,%xmm4
andl %ebp,%esi
jmp .L004loop
.align 16
.L004loop:
rorl $2,%ebx
xorl %edx,%esi
movl %eax,%ebp
punpcklqdq %xmm1,%xmm4
movdqa %xmm3,%xmm6
addl (%esp),%edi
xorl %ecx,%ebx
paddd %xmm3,%xmm7
movdqa %xmm0,64(%esp)
roll $5,%eax
addl %esi,%edi
psrldq $4,%xmm6
andl %ebx,%ebp
xorl %ecx,%ebx
pxor %xmm0,%xmm4
addl %eax,%edi
rorl $7,%eax
pxor %xmm2,%xmm6
xorl %ecx,%ebp
movl %edi,%esi
addl 4(%esp),%edx
pxor %xmm6,%xmm4
xorl %ebx,%eax
roll $5,%edi
movdqa %xmm7,48(%esp)
addl %ebp,%edx
andl %eax,%esi
movdqa %xmm4,%xmm0
xorl %ebx,%eax
addl %edi,%edx
rorl $7,%edi
movdqa %xmm4,%xmm6
xorl %ebx,%esi
pslldq $12,%xmm0
paddd %xmm4,%xmm4
movl %edx,%ebp
addl 8(%esp),%ecx
psrld $31,%xmm6
xorl %eax,%edi
roll $5,%edx
movdqa %xmm0,%xmm7
addl %esi,%ecx
andl %edi,%ebp
xorl %eax,%edi
psrld $30,%xmm0
addl %edx,%ecx
rorl $7,%edx
por %xmm6,%xmm4
xorl %eax,%ebp
movl %ecx,%esi
addl 12(%esp),%ebx
pslld $2,%xmm7
xorl %edi,%edx
roll $5,%ecx
pxor %xmm0,%xmm4
movdqa 96(%esp),%xmm0
addl %ebp,%ebx
andl %edx,%esi
pxor %xmm7,%xmm4
pshufd $238,%xmm1,%xmm5
xorl %edi,%edx
addl %ecx,%ebx
rorl $7,%ecx
xorl %edi,%esi
movl %ebx,%ebp
punpcklqdq %xmm2,%xmm5
movdqa %xmm4,%xmm7
addl 16(%esp),%eax
xorl %edx,%ecx
paddd %xmm4,%xmm0
movdqa %xmm1,80(%esp)
roll $5,%ebx
addl %esi,%eax
psrldq $4,%xmm7
andl %ecx,%ebp
xorl %edx,%ecx
pxor %xmm1,%xmm5
addl %ebx,%eax
rorl $7,%ebx
pxor %xmm3,%xmm7
xorl %edx,%ebp
movl %eax,%esi
addl 20(%esp),%edi
pxor %xmm7,%xmm5
xorl %ecx,%ebx
roll $5,%eax
movdqa %xmm0,(%esp)
addl %ebp,%edi
andl %ebx,%esi
movdqa %xmm5,%xmm1
xorl %ecx,%ebx
addl %eax,%edi
rorl $7,%eax
movdqa %xmm5,%xmm7
xorl %ecx,%esi
pslldq $12,%xmm1
paddd %xmm5,%xmm5
movl %edi,%ebp
addl 24(%esp),%edx
psrld $31,%xmm7
xorl %ebx,%eax
roll $5,%edi
movdqa %xmm1,%xmm0
addl %esi,%edx
andl %eax,%ebp
xorl %ebx,%eax
psrld $30,%xmm1
addl %edi,%edx
rorl $7,%edi
por %xmm7,%xmm5
xorl %ebx,%ebp
movl %edx,%esi
addl 28(%esp),%ecx
pslld $2,%xmm0
xorl %eax,%edi
roll $5,%edx
pxor %xmm1,%xmm5
movdqa 112(%esp),%xmm1
addl %ebp,%ecx
andl %edi,%esi
pxor %xmm0,%xmm5
pshufd $238,%xmm2,%xmm6
xorl %eax,%edi
addl %edx,%ecx
rorl $7,%edx
xorl %eax,%esi
movl %ecx,%ebp
punpcklqdq %xmm3,%xmm6
movdqa %xmm5,%xmm0
addl 32(%esp),%ebx
xorl %edi,%edx
paddd %xmm5,%xmm1
movdqa %xmm2,96(%esp)
roll $5,%ecx
addl %esi,%ebx
psrldq $4,%xmm0
andl %edx,%ebp
xorl %edi,%edx
pxor %xmm2,%xmm6
addl %ecx,%ebx
rorl $7,%ecx
pxor %xmm4,%xmm0
xorl %edi,%ebp
movl %ebx,%esi
addl 36(%esp),%eax
pxor %xmm0,%xmm6
xorl %edx,%ecx
roll $5,%ebx
movdqa %xmm1,16(%esp)
addl %ebp,%eax
andl %ecx,%esi
movdqa %xmm6,%xmm2
xorl %edx,%ecx
addl %ebx,%eax
rorl $7,%ebx
movdqa %xmm6,%xmm0
xorl %edx,%esi
pslldq $12,%xmm2
paddd %xmm6,%xmm6
movl %eax,%ebp
addl 40(%esp),%edi
psrld $31,%xmm0
xorl %ecx,%ebx
roll $5,%eax
movdqa %xmm2,%xmm1
addl %esi,%edi
andl %ebx,%ebp
xorl %ecx,%ebx
psrld $30,%xmm2
addl %eax,%edi
rorl $7,%eax
por %xmm0,%xmm6
xorl %ecx,%ebp
movdqa 64(%esp),%xmm0
movl %edi,%esi
addl 44(%esp),%edx
pslld $2,%xmm1
xorl %ebx,%eax
roll $5,%edi
pxor %xmm2,%xmm6
movdqa 112(%esp),%xmm2
addl %ebp,%edx
andl %eax,%esi
pxor %xmm1,%xmm6
pshufd $238,%xmm3,%xmm7
xorl %ebx,%eax
addl %edi,%edx
rorl $7,%edi
xorl %ebx,%esi
movl %edx,%ebp
punpcklqdq %xmm4,%xmm7
movdqa %xmm6,%xmm1
addl 48(%esp),%ecx
xorl %eax,%edi
paddd %xmm6,%xmm2
movdqa %xmm3,64(%esp)
roll $5,%edx
addl %esi,%ecx
psrldq $4,%xmm1
andl %edi,%ebp
xorl %eax,%edi
pxor %xmm3,%xmm7
addl %edx,%ecx
rorl $7,%edx
pxor %xmm5,%xmm1
xorl %eax,%ebp
movl %ecx,%esi
addl 52(%esp),%ebx
pxor %xmm1,%xmm7
xorl %edi,%edx
roll $5,%ecx
movdqa %xmm2,32(%esp)
addl %ebp,%ebx
andl %edx,%esi
movdqa %xmm7,%xmm3
xorl %edi,%edx
addl %ecx,%ebx
rorl $7,%ecx
movdqa %xmm7,%xmm1
xorl %edi,%esi
pslldq $12,%xmm3
paddd %xmm7,%xmm7
movl %ebx,%ebp
addl 56(%esp),%eax
psrld $31,%xmm1
xorl %edx,%ecx
roll $5,%ebx
movdqa %xmm3,%xmm2
addl %esi,%eax
andl %ecx,%ebp
xorl %edx,%ecx
psrld $30,%xmm3
addl %ebx,%eax
rorl $7,%ebx
por %xmm1,%xmm7
xorl %edx,%ebp
movdqa 80(%esp),%xmm1
movl %eax,%esi
addl 60(%esp),%edi
pslld $2,%xmm2
xorl %ecx,%ebx
roll $5,%eax
pxor %xmm3,%xmm7
movdqa 112(%esp),%xmm3
addl %ebp,%edi
andl %ebx,%esi
pxor %xmm2,%xmm7
pshufd $238,%xmm6,%xmm2
xorl %ecx,%ebx
addl %eax,%edi
rorl $7,%eax
pxor %xmm4,%xmm0
punpcklqdq %xmm7,%xmm2
xorl %ecx,%esi
movl %edi,%ebp
addl (%esp),%edx
pxor %xmm1,%xmm0
movdqa %xmm4,80(%esp)
xorl %ebx,%eax
roll $5,%edi
movdqa %xmm3,%xmm4
addl %esi,%edx
paddd %xmm7,%xmm3
andl %eax,%ebp
pxor %xmm2,%xmm0
xorl %ebx,%eax
addl %edi,%edx
rorl $7,%edi
xorl %ebx,%ebp
movdqa %xmm0,%xmm2
movdqa %xmm3,48(%esp)
movl %edx,%esi
addl 4(%esp),%ecx
xorl %eax,%edi
roll $5,%edx
pslld $2,%xmm0
addl %ebp,%ecx
andl %edi,%esi
psrld $30,%xmm2
xorl %eax,%edi
addl %edx,%ecx
rorl $7,%edx
xorl %eax,%esi
movl %ecx,%ebp
addl 8(%esp),%ebx
xorl %edi,%edx
roll $5,%ecx
por %xmm2,%xmm0
addl %esi,%ebx
andl %edx,%ebp
movdqa 96(%esp),%xmm2
xorl %edi,%edx
addl %ecx,%ebx
addl 12(%esp),%eax
xorl %edi,%ebp
movl %ebx,%esi
pshufd $238,%xmm7,%xmm3
roll $5,%ebx
addl %ebp,%eax
xorl %edx,%esi
rorl $7,%ecx
addl %ebx,%eax
addl 16(%esp),%edi
pxor %xmm5,%xmm1
punpcklqdq %xmm0,%xmm3
xorl %ecx,%esi
movl %eax,%ebp
roll $5,%eax
pxor %xmm2,%xmm1
movdqa %xmm5,96(%esp)
addl %esi,%edi
xorl %ecx,%ebp
movdqa %xmm4,%xmm5
rorl $7,%ebx
paddd %xmm0,%xmm4
addl %eax,%edi
pxor %xmm3,%xmm1
addl 20(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
movdqa %xmm1,%xmm3
movdqa %xmm4,(%esp)
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %edi,%edx
pslld $2,%xmm1
addl 24(%esp),%ecx
xorl %eax,%esi
psrld $30,%xmm3
movl %edx,%ebp
roll $5,%edx
addl %esi,%ecx
xorl %eax,%ebp
rorl $7,%edi
addl %edx,%ecx
por %xmm3,%xmm1
addl 28(%esp),%ebx
xorl %edi,%ebp
movdqa 64(%esp),%xmm3
movl %ecx,%esi
roll $5,%ecx
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
pshufd $238,%xmm0,%xmm4
addl %ecx,%ebx
addl 32(%esp),%eax
pxor %xmm6,%xmm2
punpcklqdq %xmm1,%xmm4
xorl %edx,%esi
movl %ebx,%ebp
roll $5,%ebx
pxor %xmm3,%xmm2
movdqa %xmm6,64(%esp)
addl %esi,%eax
xorl %edx,%ebp
movdqa 128(%esp),%xmm6
rorl $7,%ecx
paddd %xmm1,%xmm5
addl %ebx,%eax
pxor %xmm4,%xmm2
addl 36(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
roll $5,%eax
movdqa %xmm2,%xmm4
movdqa %xmm5,16(%esp)
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%edi
pslld $2,%xmm2
addl 40(%esp),%edx
xorl %ebx,%esi
psrld $30,%xmm4
movl %edi,%ebp
roll $5,%edi
addl %esi,%edx
xorl %ebx,%ebp
rorl $7,%eax
addl %edi,%edx
por %xmm4,%xmm2
addl 44(%esp),%ecx
xorl %eax,%ebp
movdqa 80(%esp),%xmm4
movl %edx,%esi
roll $5,%edx
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
pshufd $238,%xmm1,%xmm5
addl %edx,%ecx
addl 48(%esp),%ebx
pxor %xmm7,%xmm3
punpcklqdq %xmm2,%xmm5
xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
pxor %xmm4,%xmm3
movdqa %xmm7,80(%esp)
addl %esi,%ebx
xorl %edi,%ebp
movdqa %xmm6,%xmm7
rorl $7,%edx
paddd %xmm2,%xmm6
addl %ecx,%ebx
pxor %xmm5,%xmm3
addl 52(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
movdqa %xmm3,%xmm5
movdqa %xmm6,32(%esp)
addl %ebp,%eax
xorl %edx,%esi
rorl $7,%ecx
addl %ebx,%eax
pslld $2,%xmm3
addl 56(%esp),%edi
xorl %ecx,%esi
psrld $30,%xmm5
movl %eax,%ebp
roll $5,%eax
addl %esi,%edi
xorl %ecx,%ebp
rorl $7,%ebx
addl %eax,%edi
por %xmm5,%xmm3
addl 60(%esp),%edx
xorl %ebx,%ebp
movdqa 96(%esp),%xmm5
movl %edi,%esi
roll $5,%edi
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
pshufd $238,%xmm2,%xmm6
addl %edi,%edx
addl (%esp),%ecx
pxor %xmm0,%xmm4
punpcklqdq %xmm3,%xmm6
xorl %eax,%esi
movl %edx,%ebp
roll $5,%edx
pxor %xmm5,%xmm4
movdqa %xmm0,96(%esp)
addl %esi,%ecx
xorl %eax,%ebp
movdqa %xmm7,%xmm0
rorl $7,%edi
paddd %xmm3,%xmm7
addl %edx,%ecx
pxor %xmm6,%xmm4
addl 4(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
roll $5,%ecx
movdqa %xmm4,%xmm6
movdqa %xmm7,48(%esp)
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
addl %ecx,%ebx
pslld $2,%xmm4
addl 8(%esp),%eax
xorl %edx,%esi
psrld $30,%xmm6
movl %ebx,%ebp
roll $5,%ebx
addl %esi,%eax
xorl %edx,%ebp
rorl $7,%ecx
addl %ebx,%eax
por %xmm6,%xmm4
addl 12(%esp),%edi
xorl %ecx,%ebp
movdqa 64(%esp),%xmm6
movl %eax,%esi
roll $5,%eax
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
pshufd $238,%xmm3,%xmm7
addl %eax,%edi
addl 16(%esp),%edx
pxor %xmm1,%xmm5
punpcklqdq %xmm4,%xmm7
xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
pxor %xmm6,%xmm5
movdqa %xmm1,64(%esp)
addl %esi,%edx
xorl %ebx,%ebp
movdqa %xmm0,%xmm1
rorl $7,%eax
paddd %xmm4,%xmm0
addl %edi,%edx
pxor %xmm7,%xmm5
addl 20(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
roll $5,%edx
movdqa %xmm5,%xmm7
movdqa %xmm0,(%esp)
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
addl %edx,%ecx
pslld $2,%xmm5
addl 24(%esp),%ebx
xorl %edi,%esi
psrld $30,%xmm7
movl %ecx,%ebp
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
addl %ecx,%ebx
por %xmm7,%xmm5
addl 28(%esp),%eax
movdqa 80(%esp),%xmm7
rorl $7,%ecx
movl %ebx,%esi
xorl %edx,%ebp
roll $5,%ebx
pshufd $238,%xmm4,%xmm0
addl %ebp,%eax
xorl %ecx,%esi
xorl %edx,%ecx
addl %ebx,%eax
addl 32(%esp),%edi
pxor %xmm2,%xmm6
punpcklqdq %xmm5,%xmm0
andl %ecx,%esi
xorl %edx,%ecx
rorl $7,%ebx
pxor %xmm7,%xmm6
movdqa %xmm2,80(%esp)
movl %eax,%ebp
xorl %ecx,%esi
roll $5,%eax
movdqa %xmm1,%xmm2
addl %esi,%edi
paddd %xmm5,%xmm1
xorl %ebx,%ebp
pxor %xmm0,%xmm6
xorl %ecx,%ebx
addl %eax,%edi
addl 36(%esp),%edx
andl %ebx,%ebp
movdqa %xmm6,%xmm0
movdqa %xmm1,16(%esp)
xorl %ecx,%ebx
rorl $7,%eax
movl %edi,%esi
xorl %ebx,%ebp
roll $5,%edi
pslld $2,%xmm6
addl %ebp,%edx
xorl %eax,%esi
psrld $30,%xmm0
xorl %ebx,%eax
addl %edi,%edx
addl 40(%esp),%ecx
andl %eax,%esi
xorl %ebx,%eax
rorl $7,%edi
por %xmm0,%xmm6
movl %edx,%ebp
xorl %eax,%esi
movdqa 96(%esp),%xmm0
roll $5,%edx
addl %esi,%ecx
xorl %edi,%ebp
xorl %eax,%edi
addl %edx,%ecx
pshufd $238,%xmm5,%xmm1
addl 44(%esp),%ebx
andl %edi,%ebp
xorl %eax,%edi
rorl $7,%edx
movl %ecx,%esi
xorl %edi,%ebp
roll $5,%ecx
addl %ebp,%ebx
xorl %edx,%esi
xorl %edi,%edx
addl %ecx,%ebx
addl 48(%esp),%eax
pxor %xmm3,%xmm7
punpcklqdq %xmm6,%xmm1
andl %edx,%esi
xorl %edi,%edx
rorl $7,%ecx
pxor %xmm0,%xmm7
movdqa %xmm3,96(%esp)
movl %ebx,%ebp
xorl %edx,%esi
roll $5,%ebx
movdqa 144(%esp),%xmm3
addl %esi,%eax
paddd %xmm6,%xmm2
xorl %ecx,%ebp
pxor %xmm1,%xmm7
xorl %edx,%ecx
addl %ebx,%eax
addl 52(%esp),%edi
andl %ecx,%ebp
movdqa %xmm7,%xmm1
movdqa %xmm2,32(%esp)
xorl %edx,%ecx
rorl $7,%ebx
movl %eax,%esi
xorl %ecx,%ebp
roll $5,%eax
pslld $2,%xmm7
addl %ebp,%edi
xorl %ebx,%esi
psrld $30,%xmm1
xorl %ecx,%ebx
addl %eax,%edi
addl 56(%esp),%edx
andl %ebx,%esi
xorl %ecx,%ebx
rorl $7,%eax
por %xmm1,%xmm7
movl %edi,%ebp
xorl %ebx,%esi
movdqa 64(%esp),%xmm1
roll $5,%edi
addl %esi,%edx
xorl %eax,%ebp
xorl %ebx,%eax
addl %edi,%edx
pshufd $238,%xmm6,%xmm2
addl 60(%esp),%ecx
andl %eax,%ebp
xorl %ebx,%eax
rorl $7,%edi
movl %edx,%esi
xorl %eax,%ebp
roll $5,%edx
addl %ebp,%ecx
xorl %edi,%esi
xorl %eax,%edi
addl %edx,%ecx
addl (%esp),%ebx
pxor %xmm4,%xmm0
punpcklqdq %xmm7,%xmm2
andl %edi,%esi
xorl %eax,%edi
rorl $7,%edx
pxor %xmm1,%xmm0
movdqa %xmm4,64(%esp)
movl %ecx,%ebp
xorl %edi,%esi
roll $5,%ecx
movdqa %xmm3,%xmm4
addl %esi,%ebx
paddd %xmm7,%xmm3
xorl %edx,%ebp
pxor %xmm2,%xmm0
xorl %edi,%edx
addl %ecx,%ebx
addl 4(%esp),%eax
andl %edx,%ebp
movdqa %xmm0,%xmm2
movdqa %xmm3,48(%esp)
xorl %edi,%edx
rorl $7,%ecx
movl %ebx,%esi
xorl %edx,%ebp
roll $5,%ebx
pslld $2,%xmm0
addl %ebp,%eax
xorl %ecx,%esi
psrld $30,%xmm2
xorl %edx,%ecx
addl %ebx,%eax
addl 8(%esp),%edi
andl %ecx,%esi
xorl %edx,%ecx
rorl $7,%ebx
por %xmm2,%xmm0
movl %eax,%ebp
xorl %ecx,%esi
movdqa 80(%esp),%xmm2
roll $5,%eax
addl %esi,%edi
xorl %ebx,%ebp
xorl %ecx,%ebx
addl %eax,%edi
pshufd $238,%xmm7,%xmm3
addl 12(%esp),%edx
andl %ebx,%ebp
xorl %ecx,%ebx
rorl $7,%eax
movl %edi,%esi
xorl %ebx,%ebp
roll $5,%edi
addl %ebp,%edx
xorl %eax,%esi
xorl %ebx,%eax
addl %edi,%edx
addl 16(%esp),%ecx
pxor %xmm5,%xmm1
punpcklqdq %xmm0,%xmm3
andl %eax,%esi
xorl %ebx,%eax
rorl $7,%edi
pxor %xmm2,%xmm1
movdqa %xmm5,80(%esp)
movl %edx,%ebp
xorl %eax,%esi
roll $5,%edx
movdqa %xmm4,%xmm5
addl %esi,%ecx
paddd %xmm0,%xmm4
xorl %edi,%ebp
pxor %xmm3,%xmm1
xorl %eax,%edi
addl %edx,%ecx
addl 20(%esp),%ebx
andl %edi,%ebp
movdqa %xmm1,%xmm3
movdqa %xmm4,(%esp)
xorl %eax,%edi
rorl $7,%edx
movl %ecx,%esi
xorl %edi,%ebp
roll $5,%ecx
pslld $2,%xmm1
addl %ebp,%ebx
xorl %edx,%esi
psrld $30,%xmm3
xorl %edi,%edx
addl %ecx,%ebx
addl 24(%esp),%eax
andl %edx,%esi
xorl %edi,%edx
rorl $7,%ecx
por %xmm3,%xmm1
movl %ebx,%ebp
xorl %edx,%esi
movdqa 96(%esp),%xmm3
roll $5,%ebx
addl %esi,%eax
xorl %ecx,%ebp
xorl %edx,%ecx
addl %ebx,%eax
pshufd $238,%xmm0,%xmm4
addl 28(%esp),%edi
andl %ecx,%ebp
xorl %edx,%ecx
rorl $7,%ebx
movl %eax,%esi
xorl %ecx,%ebp
roll $5,%eax
addl %ebp,%edi
xorl %ebx,%esi
xorl %ecx,%ebx
addl %eax,%edi
addl 32(%esp),%edx
pxor %xmm6,%xmm2
punpcklqdq %xmm1,%xmm4
andl %ebx,%esi
xorl %ecx,%ebx
rorl $7,%eax
pxor %xmm3,%xmm2
movdqa %xmm6,96(%esp)
movl %edi,%ebp
xorl %ebx,%esi
roll $5,%edi
movdqa %xmm5,%xmm6
addl %esi,%edx
paddd %xmm1,%xmm5
xorl %eax,%ebp
pxor %xmm4,%xmm2
xorl %ebx,%eax
addl %edi,%edx
addl 36(%esp),%ecx
andl %eax,%ebp
movdqa %xmm2,%xmm4
movdqa %xmm5,16(%esp)
xorl %ebx,%eax
rorl $7,%edi
movl %edx,%esi
xorl %eax,%ebp
roll $5,%edx
pslld $2,%xmm2
addl %ebp,%ecx
xorl %edi,%esi
psrld $30,%xmm4
xorl %eax,%edi
addl %edx,%ecx
addl 40(%esp),%ebx
andl %edi,%esi
xorl %eax,%edi
rorl $7,%edx
por %xmm4,%xmm2
movl %ecx,%ebp
xorl %edi,%esi
movdqa 64(%esp),%xmm4
roll $5,%ecx
addl %esi,%ebx
xorl %edx,%ebp
xorl %edi,%edx
addl %ecx,%ebx
pshufd $238,%xmm1,%xmm5
addl 44(%esp),%eax
andl %edx,%ebp
xorl %edi,%edx
rorl $7,%ecx
movl %ebx,%esi
xorl %edx,%ebp
roll $5,%ebx
addl %ebp,%eax
xorl %edx,%esi
addl %ebx,%eax
addl 48(%esp),%edi
pxor %xmm7,%xmm3
punpcklqdq %xmm2,%xmm5
xorl %ecx,%esi
movl %eax,%ebp
roll $5,%eax
pxor %xmm4,%xmm3
movdqa %xmm7,64(%esp)
addl %esi,%edi
xorl %ecx,%ebp
movdqa %xmm6,%xmm7
rorl $7,%ebx
paddd %xmm2,%xmm6
addl %eax,%edi
pxor %xmm5,%xmm3
addl 52(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
movdqa %xmm3,%xmm5
movdqa %xmm6,32(%esp)
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %edi,%edx
pslld $2,%xmm3
addl 56(%esp),%ecx
xorl %eax,%esi
psrld $30,%xmm5
movl %edx,%ebp
roll $5,%edx
addl %esi,%ecx
xorl %eax,%ebp
rorl $7,%edi
addl %edx,%ecx
por %xmm5,%xmm3
addl 60(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
roll $5,%ecx
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
addl %ecx,%ebx
addl (%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
roll $5,%ebx
addl %esi,%eax
xorl %edx,%ebp
rorl $7,%ecx
paddd %xmm3,%xmm7
addl %ebx,%eax
addl 4(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
movdqa %xmm7,48(%esp)
roll $5,%eax
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%edi
addl 8(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
addl %esi,%edx
xorl %ebx,%ebp
rorl $7,%eax
addl %edi,%edx
addl 12(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
roll $5,%edx
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
addl %edx,%ecx
movl 196(%esp),%ebp
cmpl 200(%esp),%ebp
je .L005done
movdqa 160(%esp),%xmm7
movdqa 176(%esp),%xmm6
movdqu (%ebp),%xmm0
movdqu 16(%ebp),%xmm1
movdqu 32(%ebp),%xmm2
movdqu 48(%ebp),%xmm3
addl $64,%ebp
.byte 102,15,56,0,198
movl %ebp,196(%esp)
movdqa %xmm7,96(%esp)
addl 16(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
.byte 102,15,56,0,206
addl %ecx,%ebx
addl 20(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
paddd %xmm7,%xmm0
roll $5,%ebx
addl %ebp,%eax
xorl %edx,%esi
rorl $7,%ecx
movdqa %xmm0,(%esp)
addl %ebx,%eax
addl 24(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
psubd %xmm7,%xmm0
roll $5,%eax
addl %esi,%edi
xorl %ecx,%ebp
rorl $7,%ebx
addl %eax,%edi
addl 28(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %edi,%edx
addl 32(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
roll $5,%edx
addl %esi,%ecx
xorl %eax,%ebp
rorl $7,%edi
.byte 102,15,56,0,214
addl %edx,%ecx
addl 36(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
paddd %xmm7,%xmm1
roll $5,%ecx
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
movdqa %xmm1,16(%esp)
addl %ecx,%ebx
addl 40(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
psubd %xmm7,%xmm1
roll $5,%ebx
addl %esi,%eax
xorl %edx,%ebp
rorl $7,%ecx
addl %ebx,%eax
addl 44(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
roll $5,%eax
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%edi
addl 48(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
addl %esi,%edx
xorl %ebx,%ebp
rorl $7,%eax
.byte 102,15,56,0,222
addl %edi,%edx
addl 52(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
paddd %xmm7,%xmm2
roll $5,%edx
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
movdqa %xmm2,32(%esp)
addl %edx,%ecx
addl 56(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
psubd %xmm7,%xmm2
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
addl %ecx,%ebx
addl 60(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
addl %ebp,%eax
rorl $7,%ecx
addl %ebx,%eax
movl 192(%esp),%ebp
addl (%ebp),%eax
addl 4(%ebp),%esi
addl 8(%ebp),%ecx
movl %eax,(%ebp)
addl 12(%ebp),%edx
movl %esi,4(%ebp)
addl 16(%ebp),%edi
movl %ecx,8(%ebp)
movl %ecx,%ebx
movl %edx,12(%ebp)
xorl %edx,%ebx
movl %edi,16(%ebp)
movl %esi,%ebp
pshufd $238,%xmm0,%xmm4
andl %ebx,%esi
movl %ebp,%ebx
jmp .L004loop
.align 16
.L005done:
addl 16(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
addl %ecx,%ebx
addl 20(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
addl %ebp,%eax
xorl %edx,%esi
rorl $7,%ecx
addl %ebx,%eax
addl 24(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
roll $5,%eax
addl %esi,%edi
xorl %ecx,%ebp
rorl $7,%ebx
addl %eax,%edi
addl 28(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %edi,%edx
addl 32(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
roll $5,%edx
addl %esi,%ecx
xorl %eax,%ebp
rorl $7,%edi
addl %edx,%ecx
addl 36(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
roll $5,%ecx
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
addl %ecx,%ebx
addl 40(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
roll $5,%ebx
addl %esi,%eax
xorl %edx,%ebp
rorl $7,%ecx
addl %ebx,%eax
addl 44(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
roll $5,%eax
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%edi
addl 48(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
addl %esi,%edx
xorl %ebx,%ebp
rorl $7,%eax
addl %edi,%edx
addl 52(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
roll $5,%edx
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
addl %edx,%ecx
addl 56(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
addl %ecx,%ebx
addl 60(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
addl %ebp,%eax
rorl $7,%ecx
addl %ebx,%eax
movl 192(%esp),%ebp
addl (%ebp),%eax
movl 204(%esp),%esp
addl 4(%ebp),%esi
addl 8(%ebp),%ecx
movl %eax,(%ebp)
addl 12(%ebp),%edx
movl %esi,4(%ebp)
addl 16(%ebp),%edi
movl %ecx,8(%ebp)
movl %edx,12(%ebp)
movl %edi,16(%ebp)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size _sha1_block_data_order_ssse3,.-_sha1_block_data_order_ssse3
.hidden _sha1_block_data_order_avx
.type _sha1_block_data_order_avx,@function
.align 16
_sha1_block_data_order_avx:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
call .L006pic_point
.L006pic_point:
popl %ebp
leal .LK_XX_XX-.L006pic_point(%ebp),%ebp
.Lavx_shortcut:
vzeroall
vmovdqa (%ebp),%xmm7
vmovdqa 16(%ebp),%xmm0
vmovdqa 32(%ebp),%xmm1
vmovdqa 48(%ebp),%xmm2
vmovdqa 64(%ebp),%xmm6
movl 20(%esp),%edi
movl 24(%esp),%ebp
movl 28(%esp),%edx
movl %esp,%esi
subl $208,%esp
andl $-64,%esp
vmovdqa %xmm0,112(%esp)
vmovdqa %xmm1,128(%esp)
vmovdqa %xmm2,144(%esp)
shll $6,%edx
vmovdqa %xmm7,160(%esp)
addl %ebp,%edx
vmovdqa %xmm6,176(%esp)
addl $64,%ebp
movl %edi,192(%esp)
movl %ebp,196(%esp)
movl %edx,200(%esp)
movl %esi,204(%esp)
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
movl 16(%edi),%edi
movl %ebx,%esi
vmovdqu -64(%ebp),%xmm0
vmovdqu -48(%ebp),%xmm1
vmovdqu -32(%ebp),%xmm2
vmovdqu -16(%ebp),%xmm3
vpshufb %xmm6,%xmm0,%xmm0
vpshufb %xmm6,%xmm1,%xmm1
vpshufb %xmm6,%xmm2,%xmm2
vmovdqa %xmm7,96(%esp)
vpshufb %xmm6,%xmm3,%xmm3
vpaddd %xmm7,%xmm0,%xmm4
vpaddd %xmm7,%xmm1,%xmm5
vpaddd %xmm7,%xmm2,%xmm6
vmovdqa %xmm4,(%esp)
movl %ecx,%ebp
vmovdqa %xmm5,16(%esp)
xorl %edx,%ebp
vmovdqa %xmm6,32(%esp)
andl %ebp,%esi
jmp .L007loop
.align 16
.L007loop:
shrdl $2,%ebx,%ebx
xorl %edx,%esi
vpalignr $8,%xmm0,%xmm1,%xmm4
movl %eax,%ebp
addl (%esp),%edi
vpaddd %xmm3,%xmm7,%xmm7
vmovdqa %xmm0,64(%esp)
xorl %ecx,%ebx
shldl $5,%eax,%eax
vpsrldq $4,%xmm3,%xmm6
addl %esi,%edi
andl %ebx,%ebp
vpxor %xmm0,%xmm4,%xmm4
xorl %ecx,%ebx
addl %eax,%edi
vpxor %xmm2,%xmm6,%xmm6
shrdl $7,%eax,%eax
xorl %ecx,%ebp
vmovdqa %xmm7,48(%esp)
movl %edi,%esi
addl 4(%esp),%edx
vpxor %xmm6,%xmm4,%xmm4
xorl %ebx,%eax
shldl $5,%edi,%edi
addl %ebp,%edx
andl %eax,%esi
vpsrld $31,%xmm4,%xmm6
xorl %ebx,%eax
addl %edi,%edx
shrdl $7,%edi,%edi
xorl %ebx,%esi
vpslldq $12,%xmm4,%xmm0
vpaddd %xmm4,%xmm4,%xmm4
movl %edx,%ebp
addl 8(%esp),%ecx
xorl %eax,%edi
shldl $5,%edx,%edx
vpsrld $30,%xmm0,%xmm7
vpor %xmm6,%xmm4,%xmm4
addl %esi,%ecx
andl %edi,%ebp
xorl %eax,%edi
addl %edx,%ecx
vpslld $2,%xmm0,%xmm0
shrdl $7,%edx,%edx
xorl %eax,%ebp
vpxor %xmm7,%xmm4,%xmm4
movl %ecx,%esi
addl 12(%esp),%ebx
xorl %edi,%edx
shldl $5,%ecx,%ecx
vpxor %xmm0,%xmm4,%xmm4
addl %ebp,%ebx
andl %edx,%esi
vmovdqa 96(%esp),%xmm0
xorl %edi,%edx
addl %ecx,%ebx
shrdl $7,%ecx,%ecx
xorl %edi,%esi
vpalignr $8,%xmm1,%xmm2,%xmm5
movl %ebx,%ebp
addl 16(%esp),%eax
vpaddd %xmm4,%xmm0,%xmm0
vmovdqa %xmm1,80(%esp)
xorl %edx,%ecx
shldl $5,%ebx,%ebx
vpsrldq $4,%xmm4,%xmm7
addl %esi,%eax
andl %ecx,%ebp
vpxor %xmm1,%xmm5,%xmm5
xorl %edx,%ecx
addl %ebx,%eax
vpxor %xmm3,%xmm7,%xmm7
shrdl $7,%ebx,%ebx
xorl %edx,%ebp
vmovdqa %xmm0,(%esp)
movl %eax,%esi
addl 20(%esp),%edi
vpxor %xmm7,%xmm5,%xmm5
xorl %ecx,%ebx
shldl $5,%eax,%eax
addl %ebp,%edi
andl %ebx,%esi
vpsrld $31,%xmm5,%xmm7
xorl %ecx,%ebx
addl %eax,%edi
shrdl $7,%eax,%eax
xorl %ecx,%esi
vpslldq $12,%xmm5,%xmm1
vpaddd %xmm5,%xmm5,%xmm5
movl %edi,%ebp
addl 24(%esp),%edx
xorl %ebx,%eax
shldl $5,%edi,%edi
vpsrld $30,%xmm1,%xmm0
vpor %xmm7,%xmm5,%xmm5
addl %esi,%edx
andl %eax,%ebp
xorl %ebx,%eax
addl %edi,%edx
vpslld $2,%xmm1,%xmm1
shrdl $7,%edi,%edi
xorl %ebx,%ebp
vpxor %xmm0,%xmm5,%xmm5
movl %edx,%esi
addl 28(%esp),%ecx
xorl %eax,%edi
shldl $5,%edx,%edx
vpxor %xmm1,%xmm5,%xmm5
addl %ebp,%ecx
andl %edi,%esi
vmovdqa 112(%esp),%xmm1
xorl %eax,%edi
addl %edx,%ecx
shrdl $7,%edx,%edx
xorl %eax,%esi
vpalignr $8,%xmm2,%xmm3,%xmm6
movl %ecx,%ebp
addl 32(%esp),%ebx
vpaddd %xmm5,%xmm1,%xmm1
vmovdqa %xmm2,96(%esp)
xorl %edi,%edx
shldl $5,%ecx,%ecx
vpsrldq $4,%xmm5,%xmm0
addl %esi,%ebx
andl %edx,%ebp
vpxor %xmm2,%xmm6,%xmm6
xorl %edi,%edx
addl %ecx,%ebx
vpxor %xmm4,%xmm0,%xmm0
shrdl $7,%ecx,%ecx
xorl %edi,%ebp
vmovdqa %xmm1,16(%esp)
movl %ebx,%esi
addl 36(%esp),%eax
vpxor %xmm0,%xmm6,%xmm6
xorl %edx,%ecx
shldl $5,%ebx,%ebx
addl %ebp,%eax
andl %ecx,%esi
vpsrld $31,%xmm6,%xmm0
xorl %edx,%ecx
addl %ebx,%eax
shrdl $7,%ebx,%ebx
xorl %edx,%esi
vpslldq $12,%xmm6,%xmm2
vpaddd %xmm6,%xmm6,%xmm6
movl %eax,%ebp
addl 40(%esp),%edi
xorl %ecx,%ebx
shldl $5,%eax,%eax
vpsrld $30,%xmm2,%xmm1
vpor %xmm0,%xmm6,%xmm6
addl %esi,%edi
andl %ebx,%ebp
xorl %ecx,%ebx
addl %eax,%edi
vpslld $2,%xmm2,%xmm2
vmovdqa 64(%esp),%xmm0
shrdl $7,%eax,%eax
xorl %ecx,%ebp
vpxor %xmm1,%xmm6,%xmm6
movl %edi,%esi
addl 44(%esp),%edx
xorl %ebx,%eax
shldl $5,%edi,%edi
vpxor %xmm2,%xmm6,%xmm6
addl %ebp,%edx
andl %eax,%esi
vmovdqa 112(%esp),%xmm2
xorl %ebx,%eax
addl %edi,%edx
shrdl $7,%edi,%edi
xorl %ebx,%esi
vpalignr $8,%xmm3,%xmm4,%xmm7
movl %edx,%ebp
addl 48(%esp),%ecx
vpaddd %xmm6,%xmm2,%xmm2
vmovdqa %xmm3,64(%esp)
xorl %eax,%edi
shldl $5,%edx,%edx
vpsrldq $4,%xmm6,%xmm1
addl %esi,%ecx
andl %edi,%ebp
vpxor %xmm3,%xmm7,%xmm7
xorl %eax,%edi
addl %edx,%ecx
vpxor %xmm5,%xmm1,%xmm1
shrdl $7,%edx,%edx
xorl %eax,%ebp
vmovdqa %xmm2,32(%esp)
movl %ecx,%esi
addl 52(%esp),%ebx
vpxor %xmm1,%xmm7,%xmm7
xorl %edi,%edx
shldl $5,%ecx,%ecx
addl %ebp,%ebx
andl %edx,%esi
vpsrld $31,%xmm7,%xmm1
xorl %edi,%edx
addl %ecx,%ebx
shrdl $7,%ecx,%ecx
xorl %edi,%esi
vpslldq $12,%xmm7,%xmm3
vpaddd %xmm7,%xmm7,%xmm7
movl %ebx,%ebp
addl 56(%esp),%eax
xorl %edx,%ecx
shldl $5,%ebx,%ebx
vpsrld $30,%xmm3,%xmm2
vpor %xmm1,%xmm7,%xmm7
addl %esi,%eax
andl %ecx,%ebp
xorl %edx,%ecx
addl %ebx,%eax
vpslld $2,%xmm3,%xmm3
vmovdqa 80(%esp),%xmm1
shrdl $7,%ebx,%ebx
xorl %edx,%ebp
vpxor %xmm2,%xmm7,%xmm7
movl %eax,%esi
addl 60(%esp),%edi
xorl %ecx,%ebx
shldl $5,%eax,%eax
vpxor %xmm3,%xmm7,%xmm7
addl %ebp,%edi
andl %ebx,%esi
vmovdqa 112(%esp),%xmm3
xorl %ecx,%ebx
addl %eax,%edi
vpalignr $8,%xmm6,%xmm7,%xmm2
vpxor %xmm4,%xmm0,%xmm0
shrdl $7,%eax,%eax
xorl %ecx,%esi
movl %edi,%ebp
addl (%esp),%edx
vpxor %xmm1,%xmm0,%xmm0
vmovdqa %xmm4,80(%esp)
xorl %ebx,%eax
shldl $5,%edi,%edi
vmovdqa %xmm3,%xmm4
vpaddd %xmm7,%xmm3,%xmm3
addl %esi,%edx
andl %eax,%ebp
vpxor %xmm2,%xmm0,%xmm0
xorl %ebx,%eax
addl %edi,%edx
shrdl $7,%edi,%edi
xorl %ebx,%ebp
vpsrld $30,%xmm0,%xmm2
vmovdqa %xmm3,48(%esp)
movl %edx,%esi
addl 4(%esp),%ecx
xorl %eax,%edi
shldl $5,%edx,%edx
vpslld $2,%xmm0,%xmm0
addl %ebp,%ecx
andl %edi,%esi
xorl %eax,%edi
addl %edx,%ecx
shrdl $7,%edx,%edx
xorl %eax,%esi
movl %ecx,%ebp
addl 8(%esp),%ebx
vpor %xmm2,%xmm0,%xmm0
xorl %edi,%edx
shldl $5,%ecx,%ecx
vmovdqa 96(%esp),%xmm2
addl %esi,%ebx
andl %edx,%ebp
xorl %edi,%edx
addl %ecx,%ebx
addl 12(%esp),%eax
xorl %edi,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpalignr $8,%xmm7,%xmm0,%xmm3
vpxor %xmm5,%xmm1,%xmm1
addl 16(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
vpxor %xmm2,%xmm1,%xmm1
vmovdqa %xmm5,96(%esp)
addl %esi,%edi
xorl %ecx,%ebp
vmovdqa %xmm4,%xmm5
vpaddd %xmm0,%xmm4,%xmm4
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpxor %xmm3,%xmm1,%xmm1
addl 20(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
shldl $5,%edi,%edi
vpsrld $30,%xmm1,%xmm3
vmovdqa %xmm4,(%esp)
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
vpslld $2,%xmm1,%xmm1
addl 24(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
shldl $5,%edx,%edx
addl %esi,%ecx
xorl %eax,%ebp
shrdl $7,%edi,%edi
addl %edx,%ecx
vpor %xmm3,%xmm1,%xmm1
addl 28(%esp),%ebx
xorl %edi,%ebp
vmovdqa 64(%esp),%xmm3
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpalignr $8,%xmm0,%xmm1,%xmm4
vpxor %xmm6,%xmm2,%xmm2
addl 32(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
vpxor %xmm3,%xmm2,%xmm2
vmovdqa %xmm6,64(%esp)
addl %esi,%eax
xorl %edx,%ebp
vmovdqa 128(%esp),%xmm6
vpaddd %xmm1,%xmm5,%xmm5
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpxor %xmm4,%xmm2,%xmm2
addl 36(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
shldl $5,%eax,%eax
vpsrld $30,%xmm2,%xmm4
vmovdqa %xmm5,16(%esp)
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpslld $2,%xmm2,%xmm2
addl 40(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
shldl $5,%edi,%edi
addl %esi,%edx
xorl %ebx,%ebp
shrdl $7,%eax,%eax
addl %edi,%edx
vpor %xmm4,%xmm2,%xmm2
addl 44(%esp),%ecx
xorl %eax,%ebp
vmovdqa 80(%esp),%xmm4
movl %edx,%esi
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
vpalignr $8,%xmm1,%xmm2,%xmm5
vpxor %xmm7,%xmm3,%xmm3
addl 48(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
vpxor %xmm4,%xmm3,%xmm3
vmovdqa %xmm7,80(%esp)
addl %esi,%ebx
xorl %edi,%ebp
vmovdqa %xmm6,%xmm7
vpaddd %xmm2,%xmm6,%xmm6
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpxor %xmm5,%xmm3,%xmm3
addl 52(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
vpsrld $30,%xmm3,%xmm5
vmovdqa %xmm6,32(%esp)
addl %ebp,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpslld $2,%xmm3,%xmm3
addl 56(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
addl %esi,%edi
xorl %ecx,%ebp
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpor %xmm5,%xmm3,%xmm3
addl 60(%esp),%edx
xorl %ebx,%ebp
vmovdqa 96(%esp),%xmm5
movl %edi,%esi
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
vpalignr $8,%xmm2,%xmm3,%xmm6
vpxor %xmm0,%xmm4,%xmm4
addl (%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
shldl $5,%edx,%edx
vpxor %xmm5,%xmm4,%xmm4
vmovdqa %xmm0,96(%esp)
addl %esi,%ecx
xorl %eax,%ebp
vmovdqa %xmm7,%xmm0
vpaddd %xmm3,%xmm7,%xmm7
shrdl $7,%edi,%edi
addl %edx,%ecx
vpxor %xmm6,%xmm4,%xmm4
addl 4(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
shldl $5,%ecx,%ecx
vpsrld $30,%xmm4,%xmm6
vmovdqa %xmm7,48(%esp)
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpslld $2,%xmm4,%xmm4
addl 8(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %edx,%ebp
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpor %xmm6,%xmm4,%xmm4
addl 12(%esp),%edi
xorl %ecx,%ebp
vmovdqa 64(%esp),%xmm6
movl %eax,%esi
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpalignr $8,%xmm3,%xmm4,%xmm7
vpxor %xmm1,%xmm5,%xmm5
addl 16(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
shldl $5,%edi,%edi
vpxor %xmm6,%xmm5,%xmm5
vmovdqa %xmm1,64(%esp)
addl %esi,%edx
xorl %ebx,%ebp
vmovdqa %xmm0,%xmm1
vpaddd %xmm4,%xmm0,%xmm0
shrdl $7,%eax,%eax
addl %edi,%edx
vpxor %xmm7,%xmm5,%xmm5
addl 20(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
shldl $5,%edx,%edx
vpsrld $30,%xmm5,%xmm7
vmovdqa %xmm0,(%esp)
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
vpslld $2,%xmm5,%xmm5
addl 24(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpor %xmm7,%xmm5,%xmm5
addl 28(%esp),%eax
vmovdqa 80(%esp),%xmm7
shrdl $7,%ecx,%ecx
movl %ebx,%esi
xorl %edx,%ebp
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %ecx,%esi
xorl %edx,%ecx
addl %ebx,%eax
vpalignr $8,%xmm4,%xmm5,%xmm0
vpxor %xmm2,%xmm6,%xmm6
addl 32(%esp),%edi
andl %ecx,%esi
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
vpxor %xmm7,%xmm6,%xmm6
vmovdqa %xmm2,80(%esp)
movl %eax,%ebp
xorl %ecx,%esi
vmovdqa %xmm1,%xmm2
vpaddd %xmm5,%xmm1,%xmm1
shldl $5,%eax,%eax
addl %esi,%edi
vpxor %xmm0,%xmm6,%xmm6
xorl %ebx,%ebp
xorl %ecx,%ebx
addl %eax,%edi
addl 36(%esp),%edx
vpsrld $30,%xmm6,%xmm0
vmovdqa %xmm1,16(%esp)
andl %ebx,%ebp
xorl %ecx,%ebx
shrdl $7,%eax,%eax
movl %edi,%esi
vpslld $2,%xmm6,%xmm6
xorl %ebx,%ebp
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %eax,%esi
xorl %ebx,%eax
addl %edi,%edx
addl 40(%esp),%ecx
andl %eax,%esi
vpor %xmm0,%xmm6,%xmm6
xorl %ebx,%eax
shrdl $7,%edi,%edi
vmovdqa 96(%esp),%xmm0
movl %edx,%ebp
xorl %eax,%esi
shldl $5,%edx,%edx
addl %esi,%ecx
xorl %edi,%ebp
xorl %eax,%edi
addl %edx,%ecx
addl 44(%esp),%ebx
andl %edi,%ebp
xorl %eax,%edi
shrdl $7,%edx,%edx
movl %ecx,%esi
xorl %edi,%ebp
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edx,%esi
xorl %edi,%edx
addl %ecx,%ebx
vpalignr $8,%xmm5,%xmm6,%xmm1
vpxor %xmm3,%xmm7,%xmm7
addl 48(%esp),%eax
andl %edx,%esi
xorl %edi,%edx
shrdl $7,%ecx,%ecx
vpxor %xmm0,%xmm7,%xmm7
vmovdqa %xmm3,96(%esp)
movl %ebx,%ebp
xorl %edx,%esi
vmovdqa 144(%esp),%xmm3
vpaddd %xmm6,%xmm2,%xmm2
shldl $5,%ebx,%ebx
addl %esi,%eax
vpxor %xmm1,%xmm7,%xmm7
xorl %ecx,%ebp
xorl %edx,%ecx
addl %ebx,%eax
addl 52(%esp),%edi
vpsrld $30,%xmm7,%xmm1
vmovdqa %xmm2,32(%esp)
andl %ecx,%ebp
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
movl %eax,%esi
vpslld $2,%xmm7,%xmm7
xorl %ecx,%ebp
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ebx,%esi
xorl %ecx,%ebx
addl %eax,%edi
addl 56(%esp),%edx
andl %ebx,%esi
vpor %xmm1,%xmm7,%xmm7
xorl %ecx,%ebx
shrdl $7,%eax,%eax
vmovdqa 64(%esp),%xmm1
movl %edi,%ebp
xorl %ebx,%esi
shldl $5,%edi,%edi
addl %esi,%edx
xorl %eax,%ebp
xorl %ebx,%eax
addl %edi,%edx
addl 60(%esp),%ecx
andl %eax,%ebp
xorl %ebx,%eax
shrdl $7,%edi,%edi
movl %edx,%esi
xorl %eax,%ebp
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %edi,%esi
xorl %eax,%edi
addl %edx,%ecx
vpalignr $8,%xmm6,%xmm7,%xmm2
vpxor %xmm4,%xmm0,%xmm0
addl (%esp),%ebx
andl %edi,%esi
xorl %eax,%edi
shrdl $7,%edx,%edx
vpxor %xmm1,%xmm0,%xmm0
vmovdqa %xmm4,64(%esp)
movl %ecx,%ebp
xorl %edi,%esi
vmovdqa %xmm3,%xmm4
vpaddd %xmm7,%xmm3,%xmm3
shldl $5,%ecx,%ecx
addl %esi,%ebx
vpxor %xmm2,%xmm0,%xmm0
xorl %edx,%ebp
xorl %edi,%edx
addl %ecx,%ebx
addl 4(%esp),%eax
vpsrld $30,%xmm0,%xmm2
vmovdqa %xmm3,48(%esp)
andl %edx,%ebp
xorl %edi,%edx
shrdl $7,%ecx,%ecx
movl %ebx,%esi
vpslld $2,%xmm0,%xmm0
xorl %edx,%ebp
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %ecx,%esi
xorl %edx,%ecx
addl %ebx,%eax
addl 8(%esp),%edi
andl %ecx,%esi
vpor %xmm2,%xmm0,%xmm0
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
vmovdqa 80(%esp),%xmm2
movl %eax,%ebp
xorl %ecx,%esi
shldl $5,%eax,%eax
addl %esi,%edi
xorl %ebx,%ebp
xorl %ecx,%ebx
addl %eax,%edi
addl 12(%esp),%edx
andl %ebx,%ebp
xorl %ecx,%ebx
shrdl $7,%eax,%eax
movl %edi,%esi
xorl %ebx,%ebp
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %eax,%esi
xorl %ebx,%eax
addl %edi,%edx
vpalignr $8,%xmm7,%xmm0,%xmm3
vpxor %xmm5,%xmm1,%xmm1
addl 16(%esp),%ecx
andl %eax,%esi
xorl %ebx,%eax
shrdl $7,%edi,%edi
vpxor %xmm2,%xmm1,%xmm1
vmovdqa %xmm5,80(%esp)
movl %edx,%ebp
xorl %eax,%esi
vmovdqa %xmm4,%xmm5
vpaddd %xmm0,%xmm4,%xmm4
shldl $5,%edx,%edx
addl %esi,%ecx
vpxor %xmm3,%xmm1,%xmm1
xorl %edi,%ebp
xorl %eax,%edi
addl %edx,%ecx
addl 20(%esp),%ebx
vpsrld $30,%xmm1,%xmm3
vmovdqa %xmm4,(%esp)
andl %edi,%ebp
xorl %eax,%edi
shrdl $7,%edx,%edx
movl %ecx,%esi
vpslld $2,%xmm1,%xmm1
xorl %edi,%ebp
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edx,%esi
xorl %edi,%edx
addl %ecx,%ebx
addl 24(%esp),%eax
andl %edx,%esi
vpor %xmm3,%xmm1,%xmm1
xorl %edi,%edx
shrdl $7,%ecx,%ecx
vmovdqa 96(%esp),%xmm3
movl %ebx,%ebp
xorl %edx,%esi
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %ecx,%ebp
xorl %edx,%ecx
addl %ebx,%eax
addl 28(%esp),%edi
andl %ecx,%ebp
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
movl %eax,%esi
xorl %ecx,%ebp
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ebx,%esi
xorl %ecx,%ebx
addl %eax,%edi
vpalignr $8,%xmm0,%xmm1,%xmm4
vpxor %xmm6,%xmm2,%xmm2
addl 32(%esp),%edx
andl %ebx,%esi
xorl %ecx,%ebx
shrdl $7,%eax,%eax
vpxor %xmm3,%xmm2,%xmm2
vmovdqa %xmm6,96(%esp)
movl %edi,%ebp
xorl %ebx,%esi
vmovdqa %xmm5,%xmm6
vpaddd %xmm1,%xmm5,%xmm5
shldl $5,%edi,%edi
addl %esi,%edx
vpxor %xmm4,%xmm2,%xmm2
xorl %eax,%ebp
xorl %ebx,%eax
addl %edi,%edx
addl 36(%esp),%ecx
vpsrld $30,%xmm2,%xmm4
vmovdqa %xmm5,16(%esp)
andl %eax,%ebp
xorl %ebx,%eax
shrdl $7,%edi,%edi
movl %edx,%esi
vpslld $2,%xmm2,%xmm2
xorl %eax,%ebp
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %edi,%esi
xorl %eax,%edi
addl %edx,%ecx
addl 40(%esp),%ebx
andl %edi,%esi
vpor %xmm4,%xmm2,%xmm2
xorl %eax,%edi
shrdl $7,%edx,%edx
vmovdqa 64(%esp),%xmm4
movl %ecx,%ebp
xorl %edi,%esi
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edx,%ebp
xorl %edi,%edx
addl %ecx,%ebx
addl 44(%esp),%eax
andl %edx,%ebp
xorl %edi,%edx
shrdl $7,%ecx,%ecx
movl %ebx,%esi
xorl %edx,%ebp
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %edx,%esi
addl %ebx,%eax
vpalignr $8,%xmm1,%xmm2,%xmm5
vpxor %xmm7,%xmm3,%xmm3
addl 48(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
vpxor %xmm4,%xmm3,%xmm3
vmovdqa %xmm7,64(%esp)
addl %esi,%edi
xorl %ecx,%ebp
vmovdqa %xmm6,%xmm7
vpaddd %xmm2,%xmm6,%xmm6
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpxor %xmm5,%xmm3,%xmm3
addl 52(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
shldl $5,%edi,%edi
vpsrld $30,%xmm3,%xmm5
vmovdqa %xmm6,32(%esp)
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
vpslld $2,%xmm3,%xmm3
addl 56(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
shldl $5,%edx,%edx
addl %esi,%ecx
xorl %eax,%ebp
shrdl $7,%edi,%edi
addl %edx,%ecx
vpor %xmm5,%xmm3,%xmm3
addl 60(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl (%esp),%eax
vpaddd %xmm3,%xmm7,%xmm7
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
addl %esi,%eax
vmovdqa %xmm7,48(%esp)
xorl %edx,%ebp
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 4(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 8(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
shldl $5,%edi,%edi
addl %esi,%edx
xorl %ebx,%ebp
shrdl $7,%eax,%eax
addl %edi,%edx
addl 12(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
movl 196(%esp),%ebp
cmpl 200(%esp),%ebp
je .L008done
vmovdqa 160(%esp),%xmm7
vmovdqa 176(%esp),%xmm6
vmovdqu (%ebp),%xmm0
vmovdqu 16(%ebp),%xmm1
vmovdqu 32(%ebp),%xmm2
vmovdqu 48(%ebp),%xmm3
addl $64,%ebp
vpshufb %xmm6,%xmm0,%xmm0
movl %ebp,196(%esp)
vmovdqa %xmm7,96(%esp)
addl 16(%esp),%ebx
xorl %edi,%esi
vpshufb %xmm6,%xmm1,%xmm1
movl %ecx,%ebp
shldl $5,%ecx,%ecx
vpaddd %xmm7,%xmm0,%xmm4
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
vmovdqa %xmm4,(%esp)
addl 20(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 24(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
addl %esi,%edi
xorl %ecx,%ebp
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 28(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
addl 32(%esp),%ecx
xorl %eax,%esi
vpshufb %xmm6,%xmm2,%xmm2
movl %edx,%ebp
shldl $5,%edx,%edx
vpaddd %xmm7,%xmm1,%xmm5
addl %esi,%ecx
xorl %eax,%ebp
shrdl $7,%edi,%edi
addl %edx,%ecx
vmovdqa %xmm5,16(%esp)
addl 36(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 40(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %edx,%ebp
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 44(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 48(%esp),%edx
xorl %ebx,%esi
vpshufb %xmm6,%xmm3,%xmm3
movl %edi,%ebp
shldl $5,%edi,%edi
vpaddd %xmm7,%xmm2,%xmm6
addl %esi,%edx
xorl %ebx,%ebp
shrdl $7,%eax,%eax
addl %edi,%edx
vmovdqa %xmm6,32(%esp)
addl 52(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
addl 56(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 60(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
shrdl $7,%ecx,%ecx
addl %ebx,%eax
movl 192(%esp),%ebp
addl (%ebp),%eax
addl 4(%ebp),%esi
addl 8(%ebp),%ecx
movl %eax,(%ebp)
addl 12(%ebp),%edx
movl %esi,4(%ebp)
addl 16(%ebp),%edi
movl %ecx,%ebx
movl %ecx,8(%ebp)
xorl %edx,%ebx
movl %edx,12(%ebp)
movl %edi,16(%ebp)
movl %esi,%ebp
andl %ebx,%esi
movl %ebp,%ebx
jmp .L007loop
.align 16
.L008done:
addl 16(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 20(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 24(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
addl %esi,%edi
xorl %ecx,%ebp
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 28(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
addl 32(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
shldl $5,%edx,%edx
addl %esi,%ecx
xorl %eax,%ebp
shrdl $7,%edi,%edi
addl %edx,%ecx
addl 36(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 40(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %edx,%ebp
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 44(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 48(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
shldl $5,%edi,%edi
addl %esi,%edx
xorl %ebx,%ebp
shrdl $7,%eax,%eax
addl %edi,%edx
addl 52(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
addl 56(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 60(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vzeroall
movl 192(%esp),%ebp
addl (%ebp),%eax
movl 204(%esp),%esp
addl 4(%ebp),%esi
addl 8(%ebp),%ecx
movl %eax,(%ebp)
addl 12(%ebp),%edx
movl %esi,4(%ebp)
addl 16(%ebp),%edi
movl %ecx,8(%ebp)
movl %edx,12(%ebp)
movl %edi,16(%ebp)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size _sha1_block_data_order_avx,.-_sha1_block_data_order_avx
.align 64
.LK_XX_XX:
.long 1518500249,1518500249,1518500249,1518500249
.long 1859775393,1859775393,1859775393,1859775393
.long 2400959708,2400959708,2400959708,2400959708
.long 3395469782,3395469782,3395469782,3395469782
.long 66051,67438087,134810123,202182159
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115
.byte 102,111,114,109,32,102,111,114,32,120,56,54,44,32,67,82
.byte 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112
.byte 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
weix2025/toy
| 97,992
|
deps/boringssl/linux-x86/crypto/fipsmodule/sha256-586-linux.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl sha256_block_data_order
.hidden sha256_block_data_order
.type sha256_block_data_order,@function
.align 16
sha256_block_data_order:
.L_sha256_block_data_order_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl %esp,%ebx
call .L000pic_point
.L000pic_point:
popl %ebp
leal .L001K256-.L000pic_point(%ebp),%ebp
subl $16,%esp
andl $-64,%esp
shll $6,%eax
addl %edi,%eax
movl %esi,(%esp)
movl %edi,4(%esp)
movl %eax,8(%esp)
movl %ebx,12(%esp)
leal OPENSSL_ia32cap_P-.L001K256(%ebp),%edx
movl (%edx),%ecx
movl 4(%edx),%ebx
testl $1048576,%ecx
jnz .L002loop
movl 8(%edx),%edx
testl $16777216,%ecx
jz .L003no_xmm
andl $1073741824,%ecx
andl $268435968,%ebx
orl %ebx,%ecx
andl $1342177280,%ecx
cmpl $1342177280,%ecx
je .L004AVX
testl $512,%ebx
jnz .L005SSSE3
.L003no_xmm:
subl %edi,%eax
cmpl $256,%eax
jae .L006unrolled
jmp .L002loop
.align 16
.L002loop:
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
bswap %eax
movl 12(%edi),%edx
bswap %ebx
pushl %eax
bswap %ecx
pushl %ebx
bswap %edx
pushl %ecx
pushl %edx
movl 16(%edi),%eax
movl 20(%edi),%ebx
movl 24(%edi),%ecx
bswap %eax
movl 28(%edi),%edx
bswap %ebx
pushl %eax
bswap %ecx
pushl %ebx
bswap %edx
pushl %ecx
pushl %edx
movl 32(%edi),%eax
movl 36(%edi),%ebx
movl 40(%edi),%ecx
bswap %eax
movl 44(%edi),%edx
bswap %ebx
pushl %eax
bswap %ecx
pushl %ebx
bswap %edx
pushl %ecx
pushl %edx
movl 48(%edi),%eax
movl 52(%edi),%ebx
movl 56(%edi),%ecx
bswap %eax
movl 60(%edi),%edx
bswap %ebx
pushl %eax
bswap %ecx
pushl %ebx
bswap %edx
pushl %ecx
pushl %edx
addl $64,%edi
leal -36(%esp),%esp
movl %edi,104(%esp)
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edi
movl %ebx,8(%esp)
xorl %ecx,%ebx
movl %ecx,12(%esp)
movl %edi,16(%esp)
movl %ebx,(%esp)
movl 16(%esi),%edx
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edi
movl %ebx,24(%esp)
movl %ecx,28(%esp)
movl %edi,32(%esp)
.align 16
.L00700_15:
movl %edx,%ecx
movl 24(%esp),%esi
rorl $14,%ecx
movl 28(%esp),%edi
xorl %edx,%ecx
xorl %edi,%esi
movl 96(%esp),%ebx
rorl $5,%ecx
andl %edx,%esi
movl %edx,20(%esp)
xorl %ecx,%edx
addl 32(%esp),%ebx
xorl %edi,%esi
rorl $6,%edx
movl %eax,%ecx
addl %esi,%ebx
rorl $9,%ecx
addl %edx,%ebx
movl 8(%esp),%edi
xorl %eax,%ecx
movl %eax,4(%esp)
leal -4(%esp),%esp
rorl $11,%ecx
movl (%ebp),%esi
xorl %eax,%ecx
movl 20(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %esi,%ebx
movl %eax,(%esp)
addl %ebx,%edx
andl 4(%esp),%eax
addl %ecx,%ebx
xorl %edi,%eax
addl $4,%ebp
addl %ebx,%eax
cmpl $3248222580,%esi
jne .L00700_15
movl 156(%esp),%ecx
jmp .L00816_63
.align 16
.L00816_63:
movl %ecx,%ebx
movl 104(%esp),%esi
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 160(%esp),%ebx
shrl $10,%edi
addl 124(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 24(%esp),%esi
rorl $14,%ecx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %edx,%ecx
xorl %edi,%esi
movl %ebx,96(%esp)
rorl $5,%ecx
andl %edx,%esi
movl %edx,20(%esp)
xorl %ecx,%edx
addl 32(%esp),%ebx
xorl %edi,%esi
rorl $6,%edx
movl %eax,%ecx
addl %esi,%ebx
rorl $9,%ecx
addl %edx,%ebx
movl 8(%esp),%edi
xorl %eax,%ecx
movl %eax,4(%esp)
leal -4(%esp),%esp
rorl $11,%ecx
movl (%ebp),%esi
xorl %eax,%ecx
movl 20(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %esi,%ebx
movl %eax,(%esp)
addl %ebx,%edx
andl 4(%esp),%eax
addl %ecx,%ebx
xorl %edi,%eax
movl 156(%esp),%ecx
addl $4,%ebp
addl %ebx,%eax
cmpl $3329325298,%esi
jne .L00816_63
movl 356(%esp),%esi
movl 8(%esp),%ebx
movl 16(%esp),%ecx
addl (%esi),%eax
addl 4(%esi),%ebx
addl 8(%esi),%edi
addl 12(%esi),%ecx
movl %eax,(%esi)
movl %ebx,4(%esi)
movl %edi,8(%esi)
movl %ecx,12(%esi)
movl 24(%esp),%eax
movl 28(%esp),%ebx
movl 32(%esp),%ecx
movl 360(%esp),%edi
addl 16(%esi),%edx
addl 20(%esi),%eax
addl 24(%esi),%ebx
addl 28(%esi),%ecx
movl %edx,16(%esi)
movl %eax,20(%esi)
movl %ebx,24(%esi)
movl %ecx,28(%esi)
leal 356(%esp),%esp
subl $256,%ebp
cmpl 8(%esp),%edi
jb .L002loop
movl 12(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 64
.L001K256:
.long 1116352408,1899447441,3049323471,3921009573,961987163,1508970993,2453635748,2870763221,3624381080,310598401,607225278,1426881987,1925078388,2162078206,2614888103,3248222580,3835390401,4022224774,264347078,604807628,770255983,1249150122,1555081692,1996064986,2554220882,2821834349,2952996808,3210313671,3336571891,3584528711,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,2177026350,2456956037,2730485921,2820302411,3259730800,3345764771,3516065817,3600352804,4094571909,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,2227730452,2361852424,2428436474,2756734187,3204031479,3329325298
.long 66051,67438087,134810123,202182159
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97
.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32
.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97
.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103
.byte 62,0
.align 16
.L006unrolled:
leal -96(%esp),%esp
movl (%esi),%eax
movl 4(%esi),%ebp
movl 8(%esi),%ecx
movl 12(%esi),%ebx
movl %ebp,4(%esp)
xorl %ecx,%ebp
movl %ecx,8(%esp)
movl %ebx,12(%esp)
movl 16(%esi),%edx
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%esi
movl %ebx,20(%esp)
movl %ecx,24(%esp)
movl %esi,28(%esp)
jmp .L009grand_loop
.align 16
.L009grand_loop:
movl (%edi),%ebx
movl 4(%edi),%ecx
bswap %ebx
movl 8(%edi),%esi
bswap %ecx
movl %ebx,32(%esp)
bswap %esi
movl %ecx,36(%esp)
movl %esi,40(%esp)
movl 12(%edi),%ebx
movl 16(%edi),%ecx
bswap %ebx
movl 20(%edi),%esi
bswap %ecx
movl %ebx,44(%esp)
bswap %esi
movl %ecx,48(%esp)
movl %esi,52(%esp)
movl 24(%edi),%ebx
movl 28(%edi),%ecx
bswap %ebx
movl 32(%edi),%esi
bswap %ecx
movl %ebx,56(%esp)
bswap %esi
movl %ecx,60(%esp)
movl %esi,64(%esp)
movl 36(%edi),%ebx
movl 40(%edi),%ecx
bswap %ebx
movl 44(%edi),%esi
bswap %ecx
movl %ebx,68(%esp)
bswap %esi
movl %ecx,72(%esp)
movl %esi,76(%esp)
movl 48(%edi),%ebx
movl 52(%edi),%ecx
bswap %ebx
movl 56(%edi),%esi
bswap %ecx
movl %ebx,80(%esp)
bswap %esi
movl %ecx,84(%esp)
movl %esi,88(%esp)
movl 60(%edi),%ebx
addl $64,%edi
bswap %ebx
movl %edi,100(%esp)
movl %ebx,92(%esp)
movl %edx,%ecx
movl 20(%esp),%esi
rorl $14,%edx
movl 24(%esp),%edi
xorl %ecx,%edx
movl 32(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1116352408(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 16(%esp),%ecx
rorl $14,%edx
movl 20(%esp),%edi
xorl %esi,%edx
movl 36(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1899447441(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 12(%esp),%esi
rorl $14,%edx
movl 16(%esp),%edi
xorl %ecx,%edx
movl 40(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3049323471(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 8(%esp),%ecx
rorl $14,%edx
movl 12(%esp),%edi
xorl %esi,%edx
movl 44(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3921009573(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 4(%esp),%esi
rorl $14,%edx
movl 8(%esp),%edi
xorl %ecx,%edx
movl 48(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 961987163(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl (%esp),%ecx
rorl $14,%edx
movl 4(%esp),%edi
xorl %esi,%edx
movl 52(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1508970993(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 28(%esp),%esi
rorl $14,%edx
movl (%esp),%edi
xorl %ecx,%edx
movl 56(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2453635748(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 24(%esp),%ecx
rorl $14,%edx
movl 28(%esp),%edi
xorl %esi,%edx
movl 60(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2870763221(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 20(%esp),%esi
rorl $14,%edx
movl 24(%esp),%edi
xorl %ecx,%edx
movl 64(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3624381080(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 16(%esp),%ecx
rorl $14,%edx
movl 20(%esp),%edi
xorl %esi,%edx
movl 68(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 310598401(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 12(%esp),%esi
rorl $14,%edx
movl 16(%esp),%edi
xorl %ecx,%edx
movl 72(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 607225278(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 8(%esp),%ecx
rorl $14,%edx
movl 12(%esp),%edi
xorl %esi,%edx
movl 76(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1426881987(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 4(%esp),%esi
rorl $14,%edx
movl 8(%esp),%edi
xorl %ecx,%edx
movl 80(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1925078388(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl (%esp),%ecx
rorl $14,%edx
movl 4(%esp),%edi
xorl %esi,%edx
movl 84(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2162078206(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 28(%esp),%esi
rorl $14,%edx
movl (%esp),%edi
xorl %ecx,%edx
movl 88(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2614888103(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 24(%esp),%ecx
rorl $14,%edx
movl 28(%esp),%edi
xorl %esi,%edx
movl 92(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3248222580(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 36(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 88(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 32(%esp),%ebx
shrl $10,%edi
addl 68(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,32(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3835390401(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 40(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 92(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 36(%esp),%ebx
shrl $10,%edi
addl 72(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,36(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 4022224774(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 44(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 32(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 40(%esp),%ebx
shrl $10,%edi
addl 76(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,40(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 264347078(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 48(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 36(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 44(%esp),%ebx
shrl $10,%edi
addl 80(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,44(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 604807628(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 52(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 40(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 48(%esp),%ebx
shrl $10,%edi
addl 84(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,48(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 770255983(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 56(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 44(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 52(%esp),%ebx
shrl $10,%edi
addl 88(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,52(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1249150122(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 60(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 48(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 56(%esp),%ebx
shrl $10,%edi
addl 92(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,56(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1555081692(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 64(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 52(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 60(%esp),%ebx
shrl $10,%edi
addl 32(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,60(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1996064986(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 68(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 56(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 64(%esp),%ebx
shrl $10,%edi
addl 36(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,64(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2554220882(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 72(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 60(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 68(%esp),%ebx
shrl $10,%edi
addl 40(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,68(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2821834349(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 76(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 64(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 72(%esp),%ebx
shrl $10,%edi
addl 44(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,72(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2952996808(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 80(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 68(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 76(%esp),%ebx
shrl $10,%edi
addl 48(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,76(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3210313671(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 84(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 72(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 80(%esp),%ebx
shrl $10,%edi
addl 52(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,80(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3336571891(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 88(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 76(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 84(%esp),%ebx
shrl $10,%edi
addl 56(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,84(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3584528711(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 92(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 80(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 88(%esp),%ebx
shrl $10,%edi
addl 60(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,88(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 113926993(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 32(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 84(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 92(%esp),%ebx
shrl $10,%edi
addl 64(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,92(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 338241895(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 36(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 88(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 32(%esp),%ebx
shrl $10,%edi
addl 68(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,32(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 666307205(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 40(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 92(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 36(%esp),%ebx
shrl $10,%edi
addl 72(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,36(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 773529912(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 44(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 32(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 40(%esp),%ebx
shrl $10,%edi
addl 76(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,40(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1294757372(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 48(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 36(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 44(%esp),%ebx
shrl $10,%edi
addl 80(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,44(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1396182291(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 52(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 40(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 48(%esp),%ebx
shrl $10,%edi
addl 84(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,48(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1695183700(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 56(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 44(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 52(%esp),%ebx
shrl $10,%edi
addl 88(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,52(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1986661051(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 60(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 48(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 56(%esp),%ebx
shrl $10,%edi
addl 92(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,56(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2177026350(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 64(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 52(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 60(%esp),%ebx
shrl $10,%edi
addl 32(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,60(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2456956037(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 68(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 56(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 64(%esp),%ebx
shrl $10,%edi
addl 36(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,64(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2730485921(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 72(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 60(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 68(%esp),%ebx
shrl $10,%edi
addl 40(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,68(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2820302411(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 76(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 64(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 72(%esp),%ebx
shrl $10,%edi
addl 44(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,72(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3259730800(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 80(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 68(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 76(%esp),%ebx
shrl $10,%edi
addl 48(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,76(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3345764771(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 84(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 72(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 80(%esp),%ebx
shrl $10,%edi
addl 52(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,80(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3516065817(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 88(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 76(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 84(%esp),%ebx
shrl $10,%edi
addl 56(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,84(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3600352804(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 92(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 80(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 88(%esp),%ebx
shrl $10,%edi
addl 60(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,88(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 4094571909(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 32(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 84(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 92(%esp),%ebx
shrl $10,%edi
addl 64(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,92(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 275423344(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 36(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 88(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 32(%esp),%ebx
shrl $10,%edi
addl 68(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,32(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 430227734(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 40(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 92(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 36(%esp),%ebx
shrl $10,%edi
addl 72(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,36(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 506948616(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 44(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 32(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 40(%esp),%ebx
shrl $10,%edi
addl 76(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,40(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 659060556(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 48(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 36(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 44(%esp),%ebx
shrl $10,%edi
addl 80(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,44(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 883997877(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 52(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 40(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 48(%esp),%ebx
shrl $10,%edi
addl 84(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,48(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 958139571(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 56(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 44(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 52(%esp),%ebx
shrl $10,%edi
addl 88(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,52(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1322822218(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 60(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 48(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 56(%esp),%ebx
shrl $10,%edi
addl 92(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,56(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1537002063(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 64(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 52(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 60(%esp),%ebx
shrl $10,%edi
addl 32(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,60(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1747873779(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 68(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 56(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 64(%esp),%ebx
shrl $10,%edi
addl 36(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,64(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1955562222(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 72(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 60(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 68(%esp),%ebx
shrl $10,%edi
addl 40(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,68(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2024104815(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 76(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 64(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 72(%esp),%ebx
shrl $10,%edi
addl 44(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,72(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2227730452(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 80(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 68(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 76(%esp),%ebx
shrl $10,%edi
addl 48(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,76(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2361852424(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 84(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 72(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 80(%esp),%ebx
shrl $10,%edi
addl 52(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,80(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2428436474(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 88(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 76(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 84(%esp),%ebx
shrl $10,%edi
addl 56(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,84(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2756734187(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 92(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 80(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 88(%esp),%ebx
shrl $10,%edi
addl 60(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3204031479(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 32(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 84(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 92(%esp),%ebx
shrl $10,%edi
addl 64(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3329325298(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 96(%esp),%esi
xorl %edi,%ebp
movl 12(%esp),%ecx
addl (%esi),%eax
addl 4(%esi),%ebp
addl 8(%esi),%edi
addl 12(%esi),%ecx
movl %eax,(%esi)
movl %ebp,4(%esi)
movl %edi,8(%esi)
movl %ecx,12(%esi)
movl %ebp,4(%esp)
xorl %edi,%ebp
movl %edi,8(%esp)
movl %ecx,12(%esp)
movl 20(%esp),%edi
movl 24(%esp),%ebx
movl 28(%esp),%ecx
addl 16(%esi),%edx
addl 20(%esi),%edi
addl 24(%esi),%ebx
addl 28(%esi),%ecx
movl %edx,16(%esi)
movl %edi,20(%esi)
movl %ebx,24(%esi)
movl %ecx,28(%esi)
movl %edi,20(%esp)
movl 100(%esp),%edi
movl %ebx,24(%esp)
movl %ecx,28(%esp)
cmpl 104(%esp),%edi
jb .L009grand_loop
movl 108(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 32
.L005SSSE3:
leal -96(%esp),%esp
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edi
movl %ebx,4(%esp)
xorl %ecx,%ebx
movl %ecx,8(%esp)
movl %edi,12(%esp)
movl 16(%esi),%edx
movl 20(%esi),%edi
movl 24(%esi),%ecx
movl 28(%esi),%esi
movl %edi,20(%esp)
movl 100(%esp),%edi
movl %ecx,24(%esp)
movl %esi,28(%esp)
movdqa 256(%ebp),%xmm7
jmp .L010grand_ssse3
.align 16
.L010grand_ssse3:
movdqu (%edi),%xmm0
movdqu 16(%edi),%xmm1
movdqu 32(%edi),%xmm2
movdqu 48(%edi),%xmm3
addl $64,%edi
.byte 102,15,56,0,199
movl %edi,100(%esp)
.byte 102,15,56,0,207
movdqa (%ebp),%xmm4
.byte 102,15,56,0,215
movdqa 16(%ebp),%xmm5
paddd %xmm0,%xmm4
.byte 102,15,56,0,223
movdqa 32(%ebp),%xmm6
paddd %xmm1,%xmm5
movdqa 48(%ebp),%xmm7
movdqa %xmm4,32(%esp)
paddd %xmm2,%xmm6
movdqa %xmm5,48(%esp)
paddd %xmm3,%xmm7
movdqa %xmm6,64(%esp)
movdqa %xmm7,80(%esp)
jmp .L011ssse3_00_47
.align 16
.L011ssse3_00_47:
addl $64,%ebp
movl %edx,%ecx
movdqa %xmm1,%xmm4
rorl $14,%edx
movl 20(%esp),%esi
movdqa %xmm3,%xmm7
xorl %ecx,%edx
movl 24(%esp),%edi
.byte 102,15,58,15,224,4
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
.byte 102,15,58,15,250,4
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
movdqa %xmm4,%xmm5
rorl $6,%edx
movl %eax,%ecx
movdqa %xmm4,%xmm6
addl %edi,%edx
movl 4(%esp),%edi
psrld $3,%xmm4
movl %eax,%esi
rorl $9,%ecx
paddd %xmm7,%xmm0
movl %eax,(%esp)
xorl %eax,%ecx
psrld $7,%xmm6
xorl %edi,%eax
addl 28(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
pshufd $250,%xmm3,%xmm7
xorl %esi,%ecx
addl 32(%esp),%edx
pslld $14,%xmm5
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm4
addl %edx,%ebx
addl 12(%esp),%edx
psrld $11,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm5,%xmm4
movl 16(%esp),%esi
xorl %ecx,%edx
pslld $11,%xmm5
movl 20(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
pxor %xmm6,%xmm4
andl %ecx,%esi
movl %ecx,12(%esp)
movdqa %xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
pxor %xmm5,%xmm4
movl %ebx,%ecx
addl %edi,%edx
psrld $10,%xmm7
movl (%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm4,%xmm0
movl %ebx,28(%esp)
xorl %ebx,%ecx
psrlq $17,%xmm6
xorl %edi,%ebx
addl 24(%esp),%edx
rorl $11,%ecx
pxor %xmm6,%xmm7
andl %ebx,%eax
xorl %esi,%ecx
psrlq $2,%xmm6
addl 36(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%eax
addl 8(%esp),%edx
pshufd $128,%xmm7,%xmm7
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
psrldq $8,%xmm7
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
paddd %xmm7,%xmm0
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,24(%esp)
pshufd $80,%xmm0,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
movdqa %xmm7,%xmm6
rorl $11,%ecx
psrld $10,%xmm7
andl %eax,%ebx
psrlq $17,%xmm6
xorl %esi,%ecx
addl 40(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%ebx
addl 4(%esp),%edx
psrlq $2,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm6,%xmm7
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
pshufd $8,%xmm7,%xmm7
xorl %edi,%esi
rorl $5,%edx
movdqa (%ebp),%xmm6
andl %ecx,%esi
movl %ecx,4(%esp)
pslldq $8,%xmm7
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm7,%xmm0
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
paddd %xmm0,%xmm6
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 44(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movdqa %xmm6,32(%esp)
movl %edx,%ecx
movdqa %xmm2,%xmm4
rorl $14,%edx
movl 4(%esp),%esi
movdqa %xmm0,%xmm7
xorl %ecx,%edx
movl 8(%esp),%edi
.byte 102,15,58,15,225,4
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
.byte 102,15,58,15,251,4
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
movdqa %xmm4,%xmm5
rorl $6,%edx
movl %eax,%ecx
movdqa %xmm4,%xmm6
addl %edi,%edx
movl 20(%esp),%edi
psrld $3,%xmm4
movl %eax,%esi
rorl $9,%ecx
paddd %xmm7,%xmm1
movl %eax,16(%esp)
xorl %eax,%ecx
psrld $7,%xmm6
xorl %edi,%eax
addl 12(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
pshufd $250,%xmm0,%xmm7
xorl %esi,%ecx
addl 48(%esp),%edx
pslld $14,%xmm5
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm4
addl %edx,%ebx
addl 28(%esp),%edx
psrld $11,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm5,%xmm4
movl (%esp),%esi
xorl %ecx,%edx
pslld $11,%xmm5
movl 4(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
pxor %xmm6,%xmm4
andl %ecx,%esi
movl %ecx,28(%esp)
movdqa %xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
pxor %xmm5,%xmm4
movl %ebx,%ecx
addl %edi,%edx
psrld $10,%xmm7
movl 16(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm4,%xmm1
movl %ebx,12(%esp)
xorl %ebx,%ecx
psrlq $17,%xmm6
xorl %edi,%ebx
addl 8(%esp),%edx
rorl $11,%ecx
pxor %xmm6,%xmm7
andl %ebx,%eax
xorl %esi,%ecx
psrlq $2,%xmm6
addl 52(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%eax
addl 24(%esp),%edx
pshufd $128,%xmm7,%xmm7
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
psrldq $8,%xmm7
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
paddd %xmm7,%xmm1
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,8(%esp)
pshufd $80,%xmm1,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
movdqa %xmm7,%xmm6
rorl $11,%ecx
psrld $10,%xmm7
andl %eax,%ebx
psrlq $17,%xmm6
xorl %esi,%ecx
addl 56(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%ebx
addl 20(%esp),%edx
psrlq $2,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm6,%xmm7
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
pshufd $8,%xmm7,%xmm7
xorl %edi,%esi
rorl $5,%edx
movdqa 16(%ebp),%xmm6
andl %ecx,%esi
movl %ecx,20(%esp)
pslldq $8,%xmm7
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm7,%xmm1
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
paddd %xmm1,%xmm6
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 60(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movdqa %xmm6,48(%esp)
movl %edx,%ecx
movdqa %xmm3,%xmm4
rorl $14,%edx
movl 20(%esp),%esi
movdqa %xmm1,%xmm7
xorl %ecx,%edx
movl 24(%esp),%edi
.byte 102,15,58,15,226,4
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
.byte 102,15,58,15,248,4
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
movdqa %xmm4,%xmm5
rorl $6,%edx
movl %eax,%ecx
movdqa %xmm4,%xmm6
addl %edi,%edx
movl 4(%esp),%edi
psrld $3,%xmm4
movl %eax,%esi
rorl $9,%ecx
paddd %xmm7,%xmm2
movl %eax,(%esp)
xorl %eax,%ecx
psrld $7,%xmm6
xorl %edi,%eax
addl 28(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
pshufd $250,%xmm1,%xmm7
xorl %esi,%ecx
addl 64(%esp),%edx
pslld $14,%xmm5
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm4
addl %edx,%ebx
addl 12(%esp),%edx
psrld $11,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm5,%xmm4
movl 16(%esp),%esi
xorl %ecx,%edx
pslld $11,%xmm5
movl 20(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
pxor %xmm6,%xmm4
andl %ecx,%esi
movl %ecx,12(%esp)
movdqa %xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
pxor %xmm5,%xmm4
movl %ebx,%ecx
addl %edi,%edx
psrld $10,%xmm7
movl (%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm4,%xmm2
movl %ebx,28(%esp)
xorl %ebx,%ecx
psrlq $17,%xmm6
xorl %edi,%ebx
addl 24(%esp),%edx
rorl $11,%ecx
pxor %xmm6,%xmm7
andl %ebx,%eax
xorl %esi,%ecx
psrlq $2,%xmm6
addl 68(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%eax
addl 8(%esp),%edx
pshufd $128,%xmm7,%xmm7
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
psrldq $8,%xmm7
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
paddd %xmm7,%xmm2
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,24(%esp)
pshufd $80,%xmm2,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
movdqa %xmm7,%xmm6
rorl $11,%ecx
psrld $10,%xmm7
andl %eax,%ebx
psrlq $17,%xmm6
xorl %esi,%ecx
addl 72(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%ebx
addl 4(%esp),%edx
psrlq $2,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm6,%xmm7
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
pshufd $8,%xmm7,%xmm7
xorl %edi,%esi
rorl $5,%edx
movdqa 32(%ebp),%xmm6
andl %ecx,%esi
movl %ecx,4(%esp)
pslldq $8,%xmm7
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm7,%xmm2
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
paddd %xmm2,%xmm6
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 76(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movdqa %xmm6,64(%esp)
movl %edx,%ecx
movdqa %xmm0,%xmm4
rorl $14,%edx
movl 4(%esp),%esi
movdqa %xmm2,%xmm7
xorl %ecx,%edx
movl 8(%esp),%edi
.byte 102,15,58,15,227,4
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
.byte 102,15,58,15,249,4
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
movdqa %xmm4,%xmm5
rorl $6,%edx
movl %eax,%ecx
movdqa %xmm4,%xmm6
addl %edi,%edx
movl 20(%esp),%edi
psrld $3,%xmm4
movl %eax,%esi
rorl $9,%ecx
paddd %xmm7,%xmm3
movl %eax,16(%esp)
xorl %eax,%ecx
psrld $7,%xmm6
xorl %edi,%eax
addl 12(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
pshufd $250,%xmm2,%xmm7
xorl %esi,%ecx
addl 80(%esp),%edx
pslld $14,%xmm5
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm4
addl %edx,%ebx
addl 28(%esp),%edx
psrld $11,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm5,%xmm4
movl (%esp),%esi
xorl %ecx,%edx
pslld $11,%xmm5
movl 4(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
pxor %xmm6,%xmm4
andl %ecx,%esi
movl %ecx,28(%esp)
movdqa %xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
pxor %xmm5,%xmm4
movl %ebx,%ecx
addl %edi,%edx
psrld $10,%xmm7
movl 16(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm4,%xmm3
movl %ebx,12(%esp)
xorl %ebx,%ecx
psrlq $17,%xmm6
xorl %edi,%ebx
addl 8(%esp),%edx
rorl $11,%ecx
pxor %xmm6,%xmm7
andl %ebx,%eax
xorl %esi,%ecx
psrlq $2,%xmm6
addl 84(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%eax
addl 24(%esp),%edx
pshufd $128,%xmm7,%xmm7
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
psrldq $8,%xmm7
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
paddd %xmm7,%xmm3
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,8(%esp)
pshufd $80,%xmm3,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
movdqa %xmm7,%xmm6
rorl $11,%ecx
psrld $10,%xmm7
andl %eax,%ebx
psrlq $17,%xmm6
xorl %esi,%ecx
addl 88(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%ebx
addl 20(%esp),%edx
psrlq $2,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm6,%xmm7
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
pshufd $8,%xmm7,%xmm7
xorl %edi,%esi
rorl $5,%edx
movdqa 48(%ebp),%xmm6
andl %ecx,%esi
movl %ecx,20(%esp)
pslldq $8,%xmm7
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm7,%xmm3
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
paddd %xmm3,%xmm6
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 92(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movdqa %xmm6,80(%esp)
cmpl $66051,64(%ebp)
jne .L011ssse3_00_47
movl %edx,%ecx
rorl $14,%edx
movl 20(%esp),%esi
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 32(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 16(%esp),%esi
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,28(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 36(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,24(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 40(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 44(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 4(%esp),%esi
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,16(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 48(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl (%esp),%esi
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,12(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 52(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,8(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 56(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 60(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 20(%esp),%esi
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 64(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 16(%esp),%esi
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,28(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 68(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,24(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 72(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 76(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 4(%esp),%esi
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,16(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 80(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl (%esp),%esi
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,12(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 84(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,8(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 88(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 92(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movl 96(%esp),%esi
xorl %edi,%ebx
movl 12(%esp),%ecx
addl (%esi),%eax
addl 4(%esi),%ebx
addl 8(%esi),%edi
addl 12(%esi),%ecx
movl %eax,(%esi)
movl %ebx,4(%esi)
movl %edi,8(%esi)
movl %ecx,12(%esi)
movl %ebx,4(%esp)
xorl %edi,%ebx
movl %edi,8(%esp)
movl %ecx,12(%esp)
movl 20(%esp),%edi
movl 24(%esp),%ecx
addl 16(%esi),%edx
addl 20(%esi),%edi
addl 24(%esi),%ecx
movl %edx,16(%esi)
movl %edi,20(%esi)
movl %edi,20(%esp)
movl 28(%esp),%edi
movl %ecx,24(%esi)
addl 28(%esi),%edi
movl %ecx,24(%esp)
movl %edi,28(%esi)
movl %edi,28(%esp)
movl 100(%esp),%edi
movdqa 64(%ebp),%xmm7
subl $192,%ebp
cmpl 104(%esp),%edi
jb .L010grand_ssse3
movl 108(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 32
.L004AVX:
leal -96(%esp),%esp
vzeroall
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edi
movl %ebx,4(%esp)
xorl %ecx,%ebx
movl %ecx,8(%esp)
movl %edi,12(%esp)
movl 16(%esi),%edx
movl 20(%esi),%edi
movl 24(%esi),%ecx
movl 28(%esi),%esi
movl %edi,20(%esp)
movl 100(%esp),%edi
movl %ecx,24(%esp)
movl %esi,28(%esp)
vmovdqa 256(%ebp),%xmm7
jmp .L012grand_avx
.align 32
.L012grand_avx:
vmovdqu (%edi),%xmm0
vmovdqu 16(%edi),%xmm1
vmovdqu 32(%edi),%xmm2
vmovdqu 48(%edi),%xmm3
addl $64,%edi
vpshufb %xmm7,%xmm0,%xmm0
movl %edi,100(%esp)
vpshufb %xmm7,%xmm1,%xmm1
vpshufb %xmm7,%xmm2,%xmm2
vpaddd (%ebp),%xmm0,%xmm4
vpshufb %xmm7,%xmm3,%xmm3
vpaddd 16(%ebp),%xmm1,%xmm5
vpaddd 32(%ebp),%xmm2,%xmm6
vpaddd 48(%ebp),%xmm3,%xmm7
vmovdqa %xmm4,32(%esp)
vmovdqa %xmm5,48(%esp)
vmovdqa %xmm6,64(%esp)
vmovdqa %xmm7,80(%esp)
jmp .L013avx_00_47
.align 16
.L013avx_00_47:
addl $64,%ebp
vpalignr $4,%xmm0,%xmm1,%xmm4
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 20(%esp),%esi
vpalignr $4,%xmm2,%xmm3,%xmm7
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
vpsrld $7,%xmm4,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
vpaddd %xmm7,%xmm0,%xmm0
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrld $3,%xmm4,%xmm7
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
vpslld $14,%xmm4,%xmm5
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,(%esp)
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
vpshufd $250,%xmm3,%xmm7
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpsrld $11,%xmm6,%xmm6
addl 32(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpxor %xmm5,%xmm4,%xmm4
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
vpslld $11,%xmm5,%xmm5
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 16(%esp),%esi
vpxor %xmm6,%xmm4,%xmm4
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
vpsrld $10,%xmm7,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
vpxor %xmm5,%xmm4,%xmm4
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
vpaddd %xmm4,%xmm0,%xmm0
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,28(%esp)
vpxor %xmm5,%xmm6,%xmm6
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
vpsrlq $19,%xmm7,%xmm7
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
vpxor %xmm7,%xmm6,%xmm6
addl 36(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
vpshufd $132,%xmm6,%xmm7
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
vpsrldq $8,%xmm7,%xmm7
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 12(%esp),%esi
vpaddd %xmm7,%xmm0,%xmm0
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
vpshufd $80,%xmm0,%xmm7
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
vpsrld $10,%xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
vpxor %xmm5,%xmm6,%xmm6
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,24(%esp)
vpsrlq $19,%xmm7,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
vpxor %xmm7,%xmm6,%xmm6
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpshufd $232,%xmm6,%xmm7
addl 40(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpslldq $8,%xmm7,%xmm7
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
vpaddd %xmm7,%xmm0,%xmm0
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 8(%esp),%esi
vpaddd (%ebp),%xmm0,%xmm6
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 44(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
vmovdqa %xmm6,32(%esp)
vpalignr $4,%xmm1,%xmm2,%xmm4
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 4(%esp),%esi
vpalignr $4,%xmm3,%xmm0,%xmm7
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
vpsrld $7,%xmm4,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,(%esp)
vpaddd %xmm7,%xmm1,%xmm1
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrld $3,%xmm4,%xmm7
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
vpslld $14,%xmm4,%xmm5
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,16(%esp)
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
vpshufd $250,%xmm0,%xmm7
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpsrld $11,%xmm6,%xmm6
addl 48(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpxor %xmm5,%xmm4,%xmm4
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
vpslld $11,%xmm5,%xmm5
movl %edx,%ecx
shrdl $14,%edx,%edx
movl (%esp),%esi
vpxor %xmm6,%xmm4,%xmm4
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
vpsrld $10,%xmm7,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
vpxor %xmm5,%xmm4,%xmm4
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
vpaddd %xmm4,%xmm1,%xmm1
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,12(%esp)
vpxor %xmm5,%xmm6,%xmm6
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
vpsrlq $19,%xmm7,%xmm7
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
vpxor %xmm7,%xmm6,%xmm6
addl 52(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
vpshufd $132,%xmm6,%xmm7
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
vpsrldq $8,%xmm7,%xmm7
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 28(%esp),%esi
vpaddd %xmm7,%xmm1,%xmm1
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
vpshufd $80,%xmm1,%xmm7
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
vpsrld $10,%xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
vpxor %xmm5,%xmm6,%xmm6
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,8(%esp)
vpsrlq $19,%xmm7,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
vpxor %xmm7,%xmm6,%xmm6
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpshufd $232,%xmm6,%xmm7
addl 56(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpslldq $8,%xmm7,%xmm7
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
vpaddd %xmm7,%xmm1,%xmm1
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 24(%esp),%esi
vpaddd 16(%ebp),%xmm1,%xmm6
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 60(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
vmovdqa %xmm6,48(%esp)
vpalignr $4,%xmm2,%xmm3,%xmm4
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 20(%esp),%esi
vpalignr $4,%xmm0,%xmm1,%xmm7
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
vpsrld $7,%xmm4,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
vpaddd %xmm7,%xmm2,%xmm2
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrld $3,%xmm4,%xmm7
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
vpslld $14,%xmm4,%xmm5
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,(%esp)
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
vpshufd $250,%xmm1,%xmm7
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpsrld $11,%xmm6,%xmm6
addl 64(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpxor %xmm5,%xmm4,%xmm4
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
vpslld $11,%xmm5,%xmm5
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 16(%esp),%esi
vpxor %xmm6,%xmm4,%xmm4
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
vpsrld $10,%xmm7,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
vpxor %xmm5,%xmm4,%xmm4
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
vpaddd %xmm4,%xmm2,%xmm2
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,28(%esp)
vpxor %xmm5,%xmm6,%xmm6
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
vpsrlq $19,%xmm7,%xmm7
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
vpxor %xmm7,%xmm6,%xmm6
addl 68(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
vpshufd $132,%xmm6,%xmm7
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
vpsrldq $8,%xmm7,%xmm7
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 12(%esp),%esi
vpaddd %xmm7,%xmm2,%xmm2
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
vpshufd $80,%xmm2,%xmm7
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
vpsrld $10,%xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
vpxor %xmm5,%xmm6,%xmm6
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,24(%esp)
vpsrlq $19,%xmm7,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
vpxor %xmm7,%xmm6,%xmm6
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpshufd $232,%xmm6,%xmm7
addl 72(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpslldq $8,%xmm7,%xmm7
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
vpaddd %xmm7,%xmm2,%xmm2
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 8(%esp),%esi
vpaddd 32(%ebp),%xmm2,%xmm6
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 76(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
vmovdqa %xmm6,64(%esp)
vpalignr $4,%xmm3,%xmm0,%xmm4
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 4(%esp),%esi
vpalignr $4,%xmm1,%xmm2,%xmm7
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
vpsrld $7,%xmm4,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,(%esp)
vpaddd %xmm7,%xmm3,%xmm3
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrld $3,%xmm4,%xmm7
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
vpslld $14,%xmm4,%xmm5
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,16(%esp)
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
vpshufd $250,%xmm2,%xmm7
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpsrld $11,%xmm6,%xmm6
addl 80(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpxor %xmm5,%xmm4,%xmm4
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
vpslld $11,%xmm5,%xmm5
movl %edx,%ecx
shrdl $14,%edx,%edx
movl (%esp),%esi
vpxor %xmm6,%xmm4,%xmm4
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
vpsrld $10,%xmm7,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
vpxor %xmm5,%xmm4,%xmm4
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
vpaddd %xmm4,%xmm3,%xmm3
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,12(%esp)
vpxor %xmm5,%xmm6,%xmm6
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
vpsrlq $19,%xmm7,%xmm7
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
vpxor %xmm7,%xmm6,%xmm6
addl 84(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
vpshufd $132,%xmm6,%xmm7
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
vpsrldq $8,%xmm7,%xmm7
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 28(%esp),%esi
vpaddd %xmm7,%xmm3,%xmm3
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
vpshufd $80,%xmm3,%xmm7
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
vpsrld $10,%xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
vpxor %xmm5,%xmm6,%xmm6
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,8(%esp)
vpsrlq $19,%xmm7,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
vpxor %xmm7,%xmm6,%xmm6
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpshufd $232,%xmm6,%xmm7
addl 88(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpslldq $8,%xmm7,%xmm7
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
vpaddd %xmm7,%xmm3,%xmm3
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 24(%esp),%esi
vpaddd 48(%ebp),%xmm3,%xmm6
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 92(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
vmovdqa %xmm6,80(%esp)
cmpl $66051,64(%ebp)
jne .L013avx_00_47
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 20(%esp),%esi
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 32(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 16(%esp),%esi
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,28(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 36(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,24(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 40(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 44(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 4(%esp),%esi
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,16(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 48(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl (%esp),%esi
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,12(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 52(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,8(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 56(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 60(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 20(%esp),%esi
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 64(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 16(%esp),%esi
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,28(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 68(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,24(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 72(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 76(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 4(%esp),%esi
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,16(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 80(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl (%esp),%esi
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,12(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 84(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,8(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 88(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 92(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movl 96(%esp),%esi
xorl %edi,%ebx
movl 12(%esp),%ecx
addl (%esi),%eax
addl 4(%esi),%ebx
addl 8(%esi),%edi
addl 12(%esi),%ecx
movl %eax,(%esi)
movl %ebx,4(%esi)
movl %edi,8(%esi)
movl %ecx,12(%esi)
movl %ebx,4(%esp)
xorl %edi,%ebx
movl %edi,8(%esp)
movl %ecx,12(%esp)
movl 20(%esp),%edi
movl 24(%esp),%ecx
addl 16(%esi),%edx
addl 20(%esi),%edi
addl 24(%esi),%ecx
movl %edx,16(%esi)
movl %edi,20(%esi)
movl %edi,20(%esp)
movl 28(%esp),%edi
movl %ecx,24(%esi)
addl 28(%esi),%edi
movl %ecx,24(%esp)
movl %edi,28(%esi)
movl %edi,28(%esp)
movl 100(%esp),%edi
vmovdqa 64(%ebp),%xmm7
subl $192,%ebp
cmpl 104(%esp),%edi
jb .L012grand_avx
movl 108(%esp),%esp
vzeroall
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size sha256_block_data_order,.-.L_sha256_block_data_order_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
weix2025/toy
| 5,668
|
deps/boringssl/linux-x86/crypto/fipsmodule/ghash-ssse3-x86-linux.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl gcm_gmult_ssse3
.hidden gcm_gmult_ssse3
.type gcm_gmult_ssse3,@function
.align 16
gcm_gmult_ssse3:
.L_gcm_gmult_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%edi
movl 24(%esp),%esi
movdqu (%edi),%xmm0
call .L000pic_point
.L000pic_point:
popl %eax
movdqa .Lreverse_bytes-.L000pic_point(%eax),%xmm7
movdqa .Llow4_mask-.L000pic_point(%eax),%xmm2
.byte 102,15,56,0,199
movdqa %xmm2,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm0
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
movl $5,%eax
.L001loop_row_1:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz .L001loop_row_1
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movl $5,%eax
.L002loop_row_2:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz .L002loop_row_2
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movl $6,%eax
.L003loop_row_3:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz .L003loop_row_3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,0,215
movdqu %xmm2,(%edi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size gcm_gmult_ssse3,.-.L_gcm_gmult_ssse3_begin
.globl gcm_ghash_ssse3
.hidden gcm_ghash_ssse3
.type gcm_ghash_ssse3,@function
.align 16
gcm_ghash_ssse3:
.L_gcm_ghash_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%edi
movl 24(%esp),%esi
movl 28(%esp),%edx
movl 32(%esp),%ecx
movdqu (%edi),%xmm0
call .L004pic_point
.L004pic_point:
popl %ebx
movdqa .Lreverse_bytes-.L004pic_point(%ebx),%xmm7
andl $-16,%ecx
.byte 102,15,56,0,199
pxor %xmm3,%xmm3
.L005loop_ghash:
movdqa .Llow4_mask-.L004pic_point(%ebx),%xmm2
movdqu (%edx),%xmm1
.byte 102,15,56,0,207
pxor %xmm1,%xmm0
movdqa %xmm2,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm0
pxor %xmm2,%xmm2
movl $5,%eax
.L006loop_row_4:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz .L006loop_row_4
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movl $5,%eax
.L007loop_row_5:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz .L007loop_row_5
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movl $6,%eax
.L008loop_row_6:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz .L008loop_row_6
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movdqa %xmm2,%xmm0
leal -256(%esi),%esi
leal 16(%edx),%edx
subl $16,%ecx
jnz .L005loop_ghash
.byte 102,15,56,0,199
movdqu %xmm0,(%edi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size gcm_ghash_ssse3,.-.L_gcm_ghash_ssse3_begin
.align 16
.Lreverse_bytes:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.align 16
.Llow4_mask:
.long 252645135,252645135,252645135,252645135
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
weix2025/toy
| 49,847
|
deps/boringssl/linux-x86/crypto/fipsmodule/sha512-586-linux.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl sha512_block_data_order
.hidden sha512_block_data_order
.type sha512_block_data_order,@function
.align 16
sha512_block_data_order:
.L_sha512_block_data_order_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl %esp,%ebx
call .L000pic_point
.L000pic_point:
popl %ebp
leal .L001K512-.L000pic_point(%ebp),%ebp
subl $16,%esp
andl $-64,%esp
shll $7,%eax
addl %edi,%eax
movl %esi,(%esp)
movl %edi,4(%esp)
movl %eax,8(%esp)
movl %ebx,12(%esp)
leal OPENSSL_ia32cap_P-.L001K512(%ebp),%edx
movl (%edx),%ecx
testl $67108864,%ecx
jz .L002loop_x86
movl 4(%edx),%edx
movq (%esi),%mm0
andl $16777216,%ecx
movq 8(%esi),%mm1
andl $512,%edx
movq 16(%esi),%mm2
orl %edx,%ecx
movq 24(%esi),%mm3
movq 32(%esi),%mm4
movq 40(%esi),%mm5
movq 48(%esi),%mm6
movq 56(%esi),%mm7
cmpl $16777728,%ecx
je .L003SSSE3
subl $80,%esp
jmp .L004loop_sse2
.align 16
.L004loop_sse2:
movq %mm1,8(%esp)
movq %mm2,16(%esp)
movq %mm3,24(%esp)
movq %mm5,40(%esp)
movq %mm6,48(%esp)
pxor %mm1,%mm2
movq %mm7,56(%esp)
movq %mm0,%mm3
movl (%edi),%eax
movl 4(%edi),%ebx
addl $8,%edi
movl $15,%edx
bswap %eax
bswap %ebx
jmp .L00500_14_sse2
.align 16
.L00500_14_sse2:
movd %eax,%mm1
movl (%edi),%eax
movd %ebx,%mm7
movl 4(%edi),%ebx
addl $8,%edi
bswap %eax
bswap %ebx
punpckldq %mm1,%mm7
movq %mm4,%mm1
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
movq %mm3,%mm0
movq %mm7,72(%esp)
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
paddq (%ebp),%mm7
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
subl $8,%esp
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm2,%mm3
movq %mm0,%mm2
addl $8,%ebp
paddq %mm6,%mm3
movq 48(%esp),%mm6
decl %edx
jnz .L00500_14_sse2
movd %eax,%mm1
movd %ebx,%mm7
punpckldq %mm1,%mm7
movq %mm4,%mm1
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
movq %mm3,%mm0
movq %mm7,72(%esp)
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
paddq (%ebp),%mm7
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
subl $8,%esp
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 192(%esp),%mm7
paddq %mm2,%mm3
movq %mm0,%mm2
addl $8,%ebp
paddq %mm6,%mm3
pxor %mm0,%mm0
movl $32,%edx
jmp .L00616_79_sse2
.align 16
.L00616_79_sse2:
movq 88(%esp),%mm5
movq %mm7,%mm1
psrlq $1,%mm7
movq %mm5,%mm6
psrlq $6,%mm5
psllq $56,%mm1
paddq %mm3,%mm0
movq %mm7,%mm3
psrlq $6,%mm7
pxor %mm1,%mm3
psllq $7,%mm1
pxor %mm7,%mm3
psrlq $1,%mm7
pxor %mm1,%mm3
movq %mm5,%mm1
psrlq $13,%mm5
pxor %mm3,%mm7
psllq $3,%mm6
pxor %mm5,%mm1
paddq 200(%esp),%mm7
pxor %mm6,%mm1
psrlq $42,%mm5
paddq 128(%esp),%mm7
pxor %mm5,%mm1
psllq $42,%mm6
movq 40(%esp),%mm5
pxor %mm6,%mm1
movq 48(%esp),%mm6
paddq %mm1,%mm7
movq %mm4,%mm1
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
movq %mm7,72(%esp)
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
paddq (%ebp),%mm7
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
subl $8,%esp
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 192(%esp),%mm7
paddq %mm6,%mm2
addl $8,%ebp
movq 88(%esp),%mm5
movq %mm7,%mm1
psrlq $1,%mm7
movq %mm5,%mm6
psrlq $6,%mm5
psllq $56,%mm1
paddq %mm3,%mm2
movq %mm7,%mm3
psrlq $6,%mm7
pxor %mm1,%mm3
psllq $7,%mm1
pxor %mm7,%mm3
psrlq $1,%mm7
pxor %mm1,%mm3
movq %mm5,%mm1
psrlq $13,%mm5
pxor %mm3,%mm7
psllq $3,%mm6
pxor %mm5,%mm1
paddq 200(%esp),%mm7
pxor %mm6,%mm1
psrlq $42,%mm5
paddq 128(%esp),%mm7
pxor %mm5,%mm1
psllq $42,%mm6
movq 40(%esp),%mm5
pxor %mm6,%mm1
movq 48(%esp),%mm6
paddq %mm1,%mm7
movq %mm4,%mm1
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
movq %mm7,72(%esp)
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
paddq (%ebp),%mm7
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
subl $8,%esp
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 192(%esp),%mm7
paddq %mm6,%mm0
addl $8,%ebp
decl %edx
jnz .L00616_79_sse2
paddq %mm3,%mm0
movq 8(%esp),%mm1
movq 24(%esp),%mm3
movq 40(%esp),%mm5
movq 48(%esp),%mm6
movq 56(%esp),%mm7
pxor %mm1,%mm2
paddq (%esi),%mm0
paddq 8(%esi),%mm1
paddq 16(%esi),%mm2
paddq 24(%esi),%mm3
paddq 32(%esi),%mm4
paddq 40(%esi),%mm5
paddq 48(%esi),%mm6
paddq 56(%esi),%mm7
movl $640,%eax
movq %mm0,(%esi)
movq %mm1,8(%esi)
movq %mm2,16(%esi)
movq %mm3,24(%esi)
movq %mm4,32(%esi)
movq %mm5,40(%esi)
movq %mm6,48(%esi)
movq %mm7,56(%esi)
leal (%esp,%eax,1),%esp
subl %eax,%ebp
cmpl 88(%esp),%edi
jb .L004loop_sse2
movl 92(%esp),%esp
emms
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 32
.L003SSSE3:
leal -64(%esp),%edx
subl $256,%esp
movdqa 640(%ebp),%xmm1
movdqu (%edi),%xmm0
.byte 102,15,56,0,193
movdqa (%ebp),%xmm3
movdqa %xmm1,%xmm2
movdqu 16(%edi),%xmm1
paddq %xmm0,%xmm3
.byte 102,15,56,0,202
movdqa %xmm3,-128(%edx)
movdqa 16(%ebp),%xmm4
movdqa %xmm2,%xmm3
movdqu 32(%edi),%xmm2
paddq %xmm1,%xmm4
.byte 102,15,56,0,211
movdqa %xmm4,-112(%edx)
movdqa 32(%ebp),%xmm5
movdqa %xmm3,%xmm4
movdqu 48(%edi),%xmm3
paddq %xmm2,%xmm5
.byte 102,15,56,0,220
movdqa %xmm5,-96(%edx)
movdqa 48(%ebp),%xmm6
movdqa %xmm4,%xmm5
movdqu 64(%edi),%xmm4
paddq %xmm3,%xmm6
.byte 102,15,56,0,229
movdqa %xmm6,-80(%edx)
movdqa 64(%ebp),%xmm7
movdqa %xmm5,%xmm6
movdqu 80(%edi),%xmm5
paddq %xmm4,%xmm7
.byte 102,15,56,0,238
movdqa %xmm7,-64(%edx)
movdqa %xmm0,(%edx)
movdqa 80(%ebp),%xmm0
movdqa %xmm6,%xmm7
movdqu 96(%edi),%xmm6
paddq %xmm5,%xmm0
.byte 102,15,56,0,247
movdqa %xmm0,-48(%edx)
movdqa %xmm1,16(%edx)
movdqa 96(%ebp),%xmm1
movdqa %xmm7,%xmm0
movdqu 112(%edi),%xmm7
paddq %xmm6,%xmm1
.byte 102,15,56,0,248
movdqa %xmm1,-32(%edx)
movdqa %xmm2,32(%edx)
movdqa 112(%ebp),%xmm2
movdqa (%edx),%xmm0
paddq %xmm7,%xmm2
movdqa %xmm2,-16(%edx)
nop
.align 32
.L007loop_ssse3:
movdqa 16(%edx),%xmm2
movdqa %xmm3,48(%edx)
leal 128(%ebp),%ebp
movq %mm1,8(%esp)
movl %edi,%ebx
movq %mm2,16(%esp)
leal 128(%edi),%edi
movq %mm3,24(%esp)
cmpl %eax,%edi
movq %mm5,40(%esp)
cmovbl %edi,%ebx
movq %mm6,48(%esp)
movl $4,%ecx
pxor %mm1,%mm2
movq %mm7,56(%esp)
pxor %mm3,%mm3
jmp .L00800_47_ssse3
.align 32
.L00800_47_ssse3:
movdqa %xmm5,%xmm3
movdqa %xmm2,%xmm1
.byte 102,15,58,15,208,8
movdqa %xmm4,(%edx)
.byte 102,15,58,15,220,8
movdqa %xmm2,%xmm4
psrlq $7,%xmm2
paddq %xmm3,%xmm0
movdqa %xmm4,%xmm3
psrlq $1,%xmm4
psllq $56,%xmm3
pxor %xmm4,%xmm2
psrlq $7,%xmm4
pxor %xmm3,%xmm2
psllq $7,%xmm3
pxor %xmm4,%xmm2
movdqa %xmm7,%xmm4
pxor %xmm3,%xmm2
movdqa %xmm7,%xmm3
psrlq $6,%xmm4
paddq %xmm2,%xmm0
movdqa %xmm7,%xmm2
psrlq $19,%xmm3
psllq $3,%xmm2
pxor %xmm3,%xmm4
psrlq $42,%xmm3
pxor %xmm2,%xmm4
psllq $42,%xmm2
pxor %xmm3,%xmm4
movdqa 32(%edx),%xmm3
pxor %xmm2,%xmm4
movdqa (%ebp),%xmm2
movq %mm4,%mm1
paddq %xmm4,%xmm0
movq -128(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
paddq %xmm0,%xmm2
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 32(%esp),%mm5
paddq %mm6,%mm2
movq 40(%esp),%mm6
movq %mm4,%mm1
movq -120(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,24(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,56(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 48(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 16(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq (%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 24(%esp),%mm5
paddq %mm6,%mm0
movq 32(%esp),%mm6
movdqa %xmm2,-128(%edx)
movdqa %xmm6,%xmm4
movdqa %xmm3,%xmm2
.byte 102,15,58,15,217,8
movdqa %xmm5,16(%edx)
.byte 102,15,58,15,229,8
movdqa %xmm3,%xmm5
psrlq $7,%xmm3
paddq %xmm4,%xmm1
movdqa %xmm5,%xmm4
psrlq $1,%xmm5
psllq $56,%xmm4
pxor %xmm5,%xmm3
psrlq $7,%xmm5
pxor %xmm4,%xmm3
psllq $7,%xmm4
pxor %xmm5,%xmm3
movdqa %xmm0,%xmm5
pxor %xmm4,%xmm3
movdqa %xmm0,%xmm4
psrlq $6,%xmm5
paddq %xmm3,%xmm1
movdqa %xmm0,%xmm3
psrlq $19,%xmm4
psllq $3,%xmm3
pxor %xmm4,%xmm5
psrlq $42,%xmm4
pxor %xmm3,%xmm5
psllq $42,%xmm3
pxor %xmm4,%xmm5
movdqa 48(%edx),%xmm4
pxor %xmm3,%xmm5
movdqa 16(%ebp),%xmm3
movq %mm4,%mm1
paddq %xmm5,%xmm1
movq -112(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,16(%esp)
paddq %xmm1,%xmm3
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,48(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 40(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 8(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 16(%esp),%mm5
paddq %mm6,%mm2
movq 24(%esp),%mm6
movq %mm4,%mm1
movq -104(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,8(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,40(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 32(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq (%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 48(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 8(%esp),%mm5
paddq %mm6,%mm0
movq 16(%esp),%mm6
movdqa %xmm3,-112(%edx)
movdqa %xmm7,%xmm5
movdqa %xmm4,%xmm3
.byte 102,15,58,15,226,8
movdqa %xmm6,32(%edx)
.byte 102,15,58,15,238,8
movdqa %xmm4,%xmm6
psrlq $7,%xmm4
paddq %xmm5,%xmm2
movdqa %xmm6,%xmm5
psrlq $1,%xmm6
psllq $56,%xmm5
pxor %xmm6,%xmm4
psrlq $7,%xmm6
pxor %xmm5,%xmm4
psllq $7,%xmm5
pxor %xmm6,%xmm4
movdqa %xmm1,%xmm6
pxor %xmm5,%xmm4
movdqa %xmm1,%xmm5
psrlq $6,%xmm6
paddq %xmm4,%xmm2
movdqa %xmm1,%xmm4
psrlq $19,%xmm5
psllq $3,%xmm4
pxor %xmm5,%xmm6
psrlq $42,%xmm5
pxor %xmm4,%xmm6
psllq $42,%xmm4
pxor %xmm5,%xmm6
movdqa (%edx),%xmm5
pxor %xmm4,%xmm6
movdqa 32(%ebp),%xmm4
movq %mm4,%mm1
paddq %xmm6,%xmm2
movq -96(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,(%esp)
paddq %xmm2,%xmm4
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,32(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 24(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 56(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 40(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq (%esp),%mm5
paddq %mm6,%mm2
movq 8(%esp),%mm6
movq %mm4,%mm1
movq -88(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,56(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,24(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 16(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 48(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 32(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 56(%esp),%mm5
paddq %mm6,%mm0
movq (%esp),%mm6
movdqa %xmm4,-96(%edx)
movdqa %xmm0,%xmm6
movdqa %xmm5,%xmm4
.byte 102,15,58,15,235,8
movdqa %xmm7,48(%edx)
.byte 102,15,58,15,247,8
movdqa %xmm5,%xmm7
psrlq $7,%xmm5
paddq %xmm6,%xmm3
movdqa %xmm7,%xmm6
psrlq $1,%xmm7
psllq $56,%xmm6
pxor %xmm7,%xmm5
psrlq $7,%xmm7
pxor %xmm6,%xmm5
psllq $7,%xmm6
pxor %xmm7,%xmm5
movdqa %xmm2,%xmm7
pxor %xmm6,%xmm5
movdqa %xmm2,%xmm6
psrlq $6,%xmm7
paddq %xmm5,%xmm3
movdqa %xmm2,%xmm5
psrlq $19,%xmm6
psllq $3,%xmm5
pxor %xmm6,%xmm7
psrlq $42,%xmm6
pxor %xmm5,%xmm7
psllq $42,%xmm5
pxor %xmm6,%xmm7
movdqa 16(%edx),%xmm6
pxor %xmm5,%xmm7
movdqa 48(%ebp),%xmm5
movq %mm4,%mm1
paddq %xmm7,%xmm3
movq -80(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,48(%esp)
paddq %xmm3,%xmm5
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,16(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 8(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 40(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 24(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 48(%esp),%mm5
paddq %mm6,%mm2
movq 56(%esp),%mm6
movq %mm4,%mm1
movq -72(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,40(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,8(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq (%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 32(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 16(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm6,%mm0
movq 48(%esp),%mm6
movdqa %xmm5,-80(%edx)
movdqa %xmm1,%xmm7
movdqa %xmm6,%xmm5
.byte 102,15,58,15,244,8
movdqa %xmm0,(%edx)
.byte 102,15,58,15,248,8
movdqa %xmm6,%xmm0
psrlq $7,%xmm6
paddq %xmm7,%xmm4
movdqa %xmm0,%xmm7
psrlq $1,%xmm0
psllq $56,%xmm7
pxor %xmm0,%xmm6
psrlq $7,%xmm0
pxor %xmm7,%xmm6
psllq $7,%xmm7
pxor %xmm0,%xmm6
movdqa %xmm3,%xmm0
pxor %xmm7,%xmm6
movdqa %xmm3,%xmm7
psrlq $6,%xmm0
paddq %xmm6,%xmm4
movdqa %xmm3,%xmm6
psrlq $19,%xmm7
psllq $3,%xmm6
pxor %xmm7,%xmm0
psrlq $42,%xmm7
pxor %xmm6,%xmm0
psllq $42,%xmm6
pxor %xmm7,%xmm0
movdqa 32(%edx),%xmm7
pxor %xmm6,%xmm0
movdqa 64(%ebp),%xmm6
movq %mm4,%mm1
paddq %xmm0,%xmm4
movq -64(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
paddq %xmm4,%xmm6
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 32(%esp),%mm5
paddq %mm6,%mm2
movq 40(%esp),%mm6
movq %mm4,%mm1
movq -56(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,24(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,56(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 48(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 16(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq (%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 24(%esp),%mm5
paddq %mm6,%mm0
movq 32(%esp),%mm6
movdqa %xmm6,-64(%edx)
movdqa %xmm2,%xmm0
movdqa %xmm7,%xmm6
.byte 102,15,58,15,253,8
movdqa %xmm1,16(%edx)
.byte 102,15,58,15,193,8
movdqa %xmm7,%xmm1
psrlq $7,%xmm7
paddq %xmm0,%xmm5
movdqa %xmm1,%xmm0
psrlq $1,%xmm1
psllq $56,%xmm0
pxor %xmm1,%xmm7
psrlq $7,%xmm1
pxor %xmm0,%xmm7
psllq $7,%xmm0
pxor %xmm1,%xmm7
movdqa %xmm4,%xmm1
pxor %xmm0,%xmm7
movdqa %xmm4,%xmm0
psrlq $6,%xmm1
paddq %xmm7,%xmm5
movdqa %xmm4,%xmm7
psrlq $19,%xmm0
psllq $3,%xmm7
pxor %xmm0,%xmm1
psrlq $42,%xmm0
pxor %xmm7,%xmm1
psllq $42,%xmm7
pxor %xmm0,%xmm1
movdqa 48(%edx),%xmm0
pxor %xmm7,%xmm1
movdqa 80(%ebp),%xmm7
movq %mm4,%mm1
paddq %xmm1,%xmm5
movq -48(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,16(%esp)
paddq %xmm5,%xmm7
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,48(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 40(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 8(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 16(%esp),%mm5
paddq %mm6,%mm2
movq 24(%esp),%mm6
movq %mm4,%mm1
movq -40(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,8(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,40(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 32(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq (%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 48(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 8(%esp),%mm5
paddq %mm6,%mm0
movq 16(%esp),%mm6
movdqa %xmm7,-48(%edx)
movdqa %xmm3,%xmm1
movdqa %xmm0,%xmm7
.byte 102,15,58,15,198,8
movdqa %xmm2,32(%edx)
.byte 102,15,58,15,202,8
movdqa %xmm0,%xmm2
psrlq $7,%xmm0
paddq %xmm1,%xmm6
movdqa %xmm2,%xmm1
psrlq $1,%xmm2
psllq $56,%xmm1
pxor %xmm2,%xmm0
psrlq $7,%xmm2
pxor %xmm1,%xmm0
psllq $7,%xmm1
pxor %xmm2,%xmm0
movdqa %xmm5,%xmm2
pxor %xmm1,%xmm0
movdqa %xmm5,%xmm1
psrlq $6,%xmm2
paddq %xmm0,%xmm6
movdqa %xmm5,%xmm0
psrlq $19,%xmm1
psllq $3,%xmm0
pxor %xmm1,%xmm2
psrlq $42,%xmm1
pxor %xmm0,%xmm2
psllq $42,%xmm0
pxor %xmm1,%xmm2
movdqa (%edx),%xmm1
pxor %xmm0,%xmm2
movdqa 96(%ebp),%xmm0
movq %mm4,%mm1
paddq %xmm2,%xmm6
movq -32(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,(%esp)
paddq %xmm6,%xmm0
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,32(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 24(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 56(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 40(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq (%esp),%mm5
paddq %mm6,%mm2
movq 8(%esp),%mm6
movq %mm4,%mm1
movq -24(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,56(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,24(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 16(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 48(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 32(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 56(%esp),%mm5
paddq %mm6,%mm0
movq (%esp),%mm6
movdqa %xmm0,-32(%edx)
movdqa %xmm4,%xmm2
movdqa %xmm1,%xmm0
.byte 102,15,58,15,207,8
movdqa %xmm3,48(%edx)
.byte 102,15,58,15,211,8
movdqa %xmm1,%xmm3
psrlq $7,%xmm1
paddq %xmm2,%xmm7
movdqa %xmm3,%xmm2
psrlq $1,%xmm3
psllq $56,%xmm2
pxor %xmm3,%xmm1
psrlq $7,%xmm3
pxor %xmm2,%xmm1
psllq $7,%xmm2
pxor %xmm3,%xmm1
movdqa %xmm6,%xmm3
pxor %xmm2,%xmm1
movdqa %xmm6,%xmm2
psrlq $6,%xmm3
paddq %xmm1,%xmm7
movdqa %xmm6,%xmm1
psrlq $19,%xmm2
psllq $3,%xmm1
pxor %xmm2,%xmm3
psrlq $42,%xmm2
pxor %xmm1,%xmm3
psllq $42,%xmm1
pxor %xmm2,%xmm3
movdqa 16(%edx),%xmm2
pxor %xmm1,%xmm3
movdqa 112(%ebp),%xmm1
movq %mm4,%mm1
paddq %xmm3,%xmm7
movq -16(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,48(%esp)
paddq %xmm7,%xmm1
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,16(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 8(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 40(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 24(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 48(%esp),%mm5
paddq %mm6,%mm2
movq 56(%esp),%mm6
movq %mm4,%mm1
movq -8(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,40(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,8(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq (%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 32(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 16(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm6,%mm0
movq 48(%esp),%mm6
movdqa %xmm1,-16(%edx)
leal 128(%ebp),%ebp
decl %ecx
jnz .L00800_47_ssse3
movdqa (%ebp),%xmm1
leal -640(%ebp),%ebp
movdqu (%ebx),%xmm0
.byte 102,15,56,0,193
movdqa (%ebp),%xmm3
movdqa %xmm1,%xmm2
movdqu 16(%ebx),%xmm1
paddq %xmm0,%xmm3
.byte 102,15,56,0,202
movq %mm4,%mm1
movq -128(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 32(%esp),%mm5
paddq %mm6,%mm2
movq 40(%esp),%mm6
movq %mm4,%mm1
movq -120(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,24(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,56(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 48(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 16(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq (%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 24(%esp),%mm5
paddq %mm6,%mm0
movq 32(%esp),%mm6
movdqa %xmm3,-128(%edx)
movdqa 16(%ebp),%xmm4
movdqa %xmm2,%xmm3
movdqu 32(%ebx),%xmm2
paddq %xmm1,%xmm4
.byte 102,15,56,0,211
movq %mm4,%mm1
movq -112(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,16(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,48(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 40(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 8(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 16(%esp),%mm5
paddq %mm6,%mm2
movq 24(%esp),%mm6
movq %mm4,%mm1
movq -104(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,8(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,40(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 32(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq (%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 48(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 8(%esp),%mm5
paddq %mm6,%mm0
movq 16(%esp),%mm6
movdqa %xmm4,-112(%edx)
movdqa 32(%ebp),%xmm5
movdqa %xmm3,%xmm4
movdqu 48(%ebx),%xmm3
paddq %xmm2,%xmm5
.byte 102,15,56,0,220
movq %mm4,%mm1
movq -96(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,32(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 24(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 56(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 40(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq (%esp),%mm5
paddq %mm6,%mm2
movq 8(%esp),%mm6
movq %mm4,%mm1
movq -88(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,56(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,24(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 16(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 48(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 32(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 56(%esp),%mm5
paddq %mm6,%mm0
movq (%esp),%mm6
movdqa %xmm5,-96(%edx)
movdqa 48(%ebp),%xmm6
movdqa %xmm4,%xmm5
movdqu 64(%ebx),%xmm4
paddq %xmm3,%xmm6
.byte 102,15,56,0,229
movq %mm4,%mm1
movq -80(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,48(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,16(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 8(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 40(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 24(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 48(%esp),%mm5
paddq %mm6,%mm2
movq 56(%esp),%mm6
movq %mm4,%mm1
movq -72(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,40(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,8(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq (%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 32(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 16(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm6,%mm0
movq 48(%esp),%mm6
movdqa %xmm6,-80(%edx)
movdqa 64(%ebp),%xmm7
movdqa %xmm5,%xmm6
movdqu 80(%ebx),%xmm5
paddq %xmm4,%xmm7
.byte 102,15,56,0,238
movq %mm4,%mm1
movq -64(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 32(%esp),%mm5
paddq %mm6,%mm2
movq 40(%esp),%mm6
movq %mm4,%mm1
movq -56(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,24(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,56(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 48(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 16(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq (%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 24(%esp),%mm5
paddq %mm6,%mm0
movq 32(%esp),%mm6
movdqa %xmm7,-64(%edx)
movdqa %xmm0,(%edx)
movdqa 80(%ebp),%xmm0
movdqa %xmm6,%xmm7
movdqu 96(%ebx),%xmm6
paddq %xmm5,%xmm0
.byte 102,15,56,0,247
movq %mm4,%mm1
movq -48(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,16(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,48(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 40(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 8(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 16(%esp),%mm5
paddq %mm6,%mm2
movq 24(%esp),%mm6
movq %mm4,%mm1
movq -40(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,8(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,40(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 32(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq (%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 48(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 8(%esp),%mm5
paddq %mm6,%mm0
movq 16(%esp),%mm6
movdqa %xmm0,-48(%edx)
movdqa %xmm1,16(%edx)
movdqa 96(%ebp),%xmm1
movdqa %xmm7,%xmm0
movdqu 112(%ebx),%xmm7
paddq %xmm6,%xmm1
.byte 102,15,56,0,248
movq %mm4,%mm1
movq -32(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,32(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 24(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 56(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 40(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq (%esp),%mm5
paddq %mm6,%mm2
movq 8(%esp),%mm6
movq %mm4,%mm1
movq -24(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,56(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,24(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 16(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 48(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 32(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 56(%esp),%mm5
paddq %mm6,%mm0
movq (%esp),%mm6
movdqa %xmm1,-32(%edx)
movdqa %xmm2,32(%edx)
movdqa 112(%ebp),%xmm2
movdqa (%edx),%xmm0
paddq %xmm7,%xmm2
movq %mm4,%mm1
movq -16(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,48(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,16(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 8(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 40(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 24(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 48(%esp),%mm5
paddq %mm6,%mm2
movq 56(%esp),%mm6
movq %mm4,%mm1
movq -8(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,40(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,8(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq (%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 32(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 16(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm6,%mm0
movq 48(%esp),%mm6
movdqa %xmm2,-16(%edx)
movq 8(%esp),%mm1
paddq %mm3,%mm0
movq 24(%esp),%mm3
movq 56(%esp),%mm7
pxor %mm1,%mm2
paddq (%esi),%mm0
paddq 8(%esi),%mm1
paddq 16(%esi),%mm2
paddq 24(%esi),%mm3
paddq 32(%esi),%mm4
paddq 40(%esi),%mm5
paddq 48(%esi),%mm6
paddq 56(%esi),%mm7
movq %mm0,(%esi)
movq %mm1,8(%esi)
movq %mm2,16(%esi)
movq %mm3,24(%esi)
movq %mm4,32(%esi)
movq %mm5,40(%esi)
movq %mm6,48(%esi)
movq %mm7,56(%esi)
cmpl %eax,%edi
jb .L007loop_ssse3
movl 76(%edx),%esp
emms
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 16
.L002loop_x86:
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 16(%edi),%eax
movl 20(%edi),%ebx
movl 24(%edi),%ecx
movl 28(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 32(%edi),%eax
movl 36(%edi),%ebx
movl 40(%edi),%ecx
movl 44(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 48(%edi),%eax
movl 52(%edi),%ebx
movl 56(%edi),%ecx
movl 60(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 64(%edi),%eax
movl 68(%edi),%ebx
movl 72(%edi),%ecx
movl 76(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 80(%edi),%eax
movl 84(%edi),%ebx
movl 88(%edi),%ecx
movl 92(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 96(%edi),%eax
movl 100(%edi),%ebx
movl 104(%edi),%ecx
movl 108(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 112(%edi),%eax
movl 116(%edi),%ebx
movl 120(%edi),%ecx
movl 124(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
addl $128,%edi
subl $72,%esp
movl %edi,204(%esp)
leal 8(%esp),%edi
movl $16,%ecx
.long 2784229001
.align 16
.L00900_15_x86:
movl 40(%esp),%ecx
movl 44(%esp),%edx
movl %ecx,%esi
shrl $9,%ecx
movl %edx,%edi
shrl $9,%edx
movl %ecx,%ebx
shll $14,%esi
movl %edx,%eax
shll $14,%edi
xorl %esi,%ebx
shrl $5,%ecx
xorl %edi,%eax
shrl $5,%edx
xorl %ecx,%eax
shll $4,%esi
xorl %edx,%ebx
shll $4,%edi
xorl %esi,%ebx
shrl $4,%ecx
xorl %edi,%eax
shrl $4,%edx
xorl %ecx,%eax
shll $5,%esi
xorl %edx,%ebx
shll $5,%edi
xorl %esi,%eax
xorl %edi,%ebx
movl 48(%esp),%ecx
movl 52(%esp),%edx
movl 56(%esp),%esi
movl 60(%esp),%edi
addl 64(%esp),%eax
adcl 68(%esp),%ebx
xorl %esi,%ecx
xorl %edi,%edx
andl 40(%esp),%ecx
andl 44(%esp),%edx
addl 192(%esp),%eax
adcl 196(%esp),%ebx
xorl %esi,%ecx
xorl %edi,%edx
movl (%ebp),%esi
movl 4(%ebp),%edi
addl %ecx,%eax
adcl %edx,%ebx
movl 32(%esp),%ecx
movl 36(%esp),%edx
addl %esi,%eax
adcl %edi,%ebx
movl %eax,(%esp)
movl %ebx,4(%esp)
addl %ecx,%eax
adcl %edx,%ebx
movl 8(%esp),%ecx
movl 12(%esp),%edx
movl %eax,32(%esp)
movl %ebx,36(%esp)
movl %ecx,%esi
shrl $2,%ecx
movl %edx,%edi
shrl $2,%edx
movl %ecx,%ebx
shll $4,%esi
movl %edx,%eax
shll $4,%edi
xorl %esi,%ebx
shrl $5,%ecx
xorl %edi,%eax
shrl $5,%edx
xorl %ecx,%ebx
shll $21,%esi
xorl %edx,%eax
shll $21,%edi
xorl %esi,%eax
shrl $21,%ecx
xorl %edi,%ebx
shrl $21,%edx
xorl %ecx,%eax
shll $5,%esi
xorl %edx,%ebx
shll $5,%edi
xorl %esi,%eax
xorl %edi,%ebx
movl 8(%esp),%ecx
movl 12(%esp),%edx
movl 16(%esp),%esi
movl 20(%esp),%edi
addl (%esp),%eax
adcl 4(%esp),%ebx
orl %esi,%ecx
orl %edi,%edx
andl 24(%esp),%ecx
andl 28(%esp),%edx
andl 8(%esp),%esi
andl 12(%esp),%edi
orl %esi,%ecx
orl %edi,%edx
addl %ecx,%eax
adcl %edx,%ebx
movl %eax,(%esp)
movl %ebx,4(%esp)
movb (%ebp),%dl
subl $8,%esp
leal 8(%ebp),%ebp
cmpb $148,%dl
jne .L00900_15_x86
.align 16
.L01016_79_x86:
movl 312(%esp),%ecx
movl 316(%esp),%edx
movl %ecx,%esi
shrl $1,%ecx
movl %edx,%edi
shrl $1,%edx
movl %ecx,%eax
shll $24,%esi
movl %edx,%ebx
shll $24,%edi
xorl %esi,%ebx
shrl $6,%ecx
xorl %edi,%eax
shrl $6,%edx
xorl %ecx,%eax
shll $7,%esi
xorl %edx,%ebx
shll $1,%edi
xorl %esi,%ebx
shrl $1,%ecx
xorl %edi,%eax
shrl $1,%edx
xorl %ecx,%eax
shll $6,%edi
xorl %edx,%ebx
xorl %edi,%eax
movl %eax,(%esp)
movl %ebx,4(%esp)
movl 208(%esp),%ecx
movl 212(%esp),%edx
movl %ecx,%esi
shrl $6,%ecx
movl %edx,%edi
shrl $6,%edx
movl %ecx,%eax
shll $3,%esi
movl %edx,%ebx
shll $3,%edi
xorl %esi,%eax
shrl $13,%ecx
xorl %edi,%ebx
shrl $13,%edx
xorl %ecx,%eax
shll $10,%esi
xorl %edx,%ebx
shll $10,%edi
xorl %esi,%ebx
shrl $10,%ecx
xorl %edi,%eax
shrl $10,%edx
xorl %ecx,%ebx
shll $13,%edi
xorl %edx,%eax
xorl %edi,%eax
movl 320(%esp),%ecx
movl 324(%esp),%edx
addl (%esp),%eax
adcl 4(%esp),%ebx
movl 248(%esp),%esi
movl 252(%esp),%edi
addl %ecx,%eax
adcl %edx,%ebx
addl %esi,%eax
adcl %edi,%ebx
movl %eax,192(%esp)
movl %ebx,196(%esp)
movl 40(%esp),%ecx
movl 44(%esp),%edx
movl %ecx,%esi
shrl $9,%ecx
movl %edx,%edi
shrl $9,%edx
movl %ecx,%ebx
shll $14,%esi
movl %edx,%eax
shll $14,%edi
xorl %esi,%ebx
shrl $5,%ecx
xorl %edi,%eax
shrl $5,%edx
xorl %ecx,%eax
shll $4,%esi
xorl %edx,%ebx
shll $4,%edi
xorl %esi,%ebx
shrl $4,%ecx
xorl %edi,%eax
shrl $4,%edx
xorl %ecx,%eax
shll $5,%esi
xorl %edx,%ebx
shll $5,%edi
xorl %esi,%eax
xorl %edi,%ebx
movl 48(%esp),%ecx
movl 52(%esp),%edx
movl 56(%esp),%esi
movl 60(%esp),%edi
addl 64(%esp),%eax
adcl 68(%esp),%ebx
xorl %esi,%ecx
xorl %edi,%edx
andl 40(%esp),%ecx
andl 44(%esp),%edx
addl 192(%esp),%eax
adcl 196(%esp),%ebx
xorl %esi,%ecx
xorl %edi,%edx
movl (%ebp),%esi
movl 4(%ebp),%edi
addl %ecx,%eax
adcl %edx,%ebx
movl 32(%esp),%ecx
movl 36(%esp),%edx
addl %esi,%eax
adcl %edi,%ebx
movl %eax,(%esp)
movl %ebx,4(%esp)
addl %ecx,%eax
adcl %edx,%ebx
movl 8(%esp),%ecx
movl 12(%esp),%edx
movl %eax,32(%esp)
movl %ebx,36(%esp)
movl %ecx,%esi
shrl $2,%ecx
movl %edx,%edi
shrl $2,%edx
movl %ecx,%ebx
shll $4,%esi
movl %edx,%eax
shll $4,%edi
xorl %esi,%ebx
shrl $5,%ecx
xorl %edi,%eax
shrl $5,%edx
xorl %ecx,%ebx
shll $21,%esi
xorl %edx,%eax
shll $21,%edi
xorl %esi,%eax
shrl $21,%ecx
xorl %edi,%ebx
shrl $21,%edx
xorl %ecx,%eax
shll $5,%esi
xorl %edx,%ebx
shll $5,%edi
xorl %esi,%eax
xorl %edi,%ebx
movl 8(%esp),%ecx
movl 12(%esp),%edx
movl 16(%esp),%esi
movl 20(%esp),%edi
addl (%esp),%eax
adcl 4(%esp),%ebx
orl %esi,%ecx
orl %edi,%edx
andl 24(%esp),%ecx
andl 28(%esp),%edx
andl 8(%esp),%esi
andl 12(%esp),%edi
orl %esi,%ecx
orl %edi,%edx
addl %ecx,%eax
adcl %edx,%ebx
movl %eax,(%esp)
movl %ebx,4(%esp)
movb (%ebp),%dl
subl $8,%esp
leal 8(%ebp),%ebp
cmpb $23,%dl
jne .L01016_79_x86
movl 840(%esp),%esi
movl 844(%esp),%edi
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edx
addl 8(%esp),%eax
adcl 12(%esp),%ebx
movl %eax,(%esi)
movl %ebx,4(%esi)
addl 16(%esp),%ecx
adcl 20(%esp),%edx
movl %ecx,8(%esi)
movl %edx,12(%esi)
movl 16(%esi),%eax
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edx
addl 24(%esp),%eax
adcl 28(%esp),%ebx
movl %eax,16(%esi)
movl %ebx,20(%esi)
addl 32(%esp),%ecx
adcl 36(%esp),%edx
movl %ecx,24(%esi)
movl %edx,28(%esi)
movl 32(%esi),%eax
movl 36(%esi),%ebx
movl 40(%esi),%ecx
movl 44(%esi),%edx
addl 40(%esp),%eax
adcl 44(%esp),%ebx
movl %eax,32(%esi)
movl %ebx,36(%esi)
addl 48(%esp),%ecx
adcl 52(%esp),%edx
movl %ecx,40(%esi)
movl %edx,44(%esi)
movl 48(%esi),%eax
movl 52(%esi),%ebx
movl 56(%esi),%ecx
movl 60(%esi),%edx
addl 56(%esp),%eax
adcl 60(%esp),%ebx
movl %eax,48(%esi)
movl %ebx,52(%esi)
addl 64(%esp),%ecx
adcl 68(%esp),%edx
movl %ecx,56(%esi)
movl %edx,60(%esi)
addl $840,%esp
subl $640,%ebp
cmpl 8(%esp),%edi
jb .L002loop_x86
movl 12(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 64
.L001K512:
.long 3609767458,1116352408
.long 602891725,1899447441
.long 3964484399,3049323471
.long 2173295548,3921009573
.long 4081628472,961987163
.long 3053834265,1508970993
.long 2937671579,2453635748
.long 3664609560,2870763221
.long 2734883394,3624381080
.long 1164996542,310598401
.long 1323610764,607225278
.long 3590304994,1426881987
.long 4068182383,1925078388
.long 991336113,2162078206
.long 633803317,2614888103
.long 3479774868,3248222580
.long 2666613458,3835390401
.long 944711139,4022224774
.long 2341262773,264347078
.long 2007800933,604807628
.long 1495990901,770255983
.long 1856431235,1249150122
.long 3175218132,1555081692
.long 2198950837,1996064986
.long 3999719339,2554220882
.long 766784016,2821834349
.long 2566594879,2952996808
.long 3203337956,3210313671
.long 1034457026,3336571891
.long 2466948901,3584528711
.long 3758326383,113926993
.long 168717936,338241895
.long 1188179964,666307205
.long 1546045734,773529912
.long 1522805485,1294757372
.long 2643833823,1396182291
.long 2343527390,1695183700
.long 1014477480,1986661051
.long 1206759142,2177026350
.long 344077627,2456956037
.long 1290863460,2730485921
.long 3158454273,2820302411
.long 3505952657,3259730800
.long 106217008,3345764771
.long 3606008344,3516065817
.long 1432725776,3600352804
.long 1467031594,4094571909
.long 851169720,275423344
.long 3100823752,430227734
.long 1363258195,506948616
.long 3750685593,659060556
.long 3785050280,883997877
.long 3318307427,958139571
.long 3812723403,1322822218
.long 2003034995,1537002063
.long 3602036899,1747873779
.long 1575990012,1955562222
.long 1125592928,2024104815
.long 2716904306,2227730452
.long 442776044,2361852424
.long 593698344,2428436474
.long 3733110249,2756734187
.long 2999351573,3204031479
.long 3815920427,3329325298
.long 3928383900,3391569614
.long 566280711,3515267271
.long 3454069534,3940187606
.long 4000239992,4118630271
.long 1914138554,116418474
.long 2731055270,174292421
.long 3203993006,289380356
.long 320620315,460393269
.long 587496836,685471733
.long 1086792851,852142971
.long 365543100,1017036298
.long 2618297676,1126000580
.long 3409855158,1288033470
.long 4234509866,1501505948
.long 987167468,1607167915
.long 1246189591,1816402316
.long 67438087,66051
.long 202182159,134810123
.size sha512_block_data_order,.-.L_sha512_block_data_order_begin
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97
.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32
.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97
.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103
.byte 62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
weix2025/toy
| 6,610
|
deps/boringssl/linux-x86/crypto/fipsmodule/ghash-x86-linux.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl gcm_init_clmul
.hidden gcm_init_clmul
.type gcm_init_clmul,@function
.align 16
gcm_init_clmul:
.L_gcm_init_clmul_begin:
movl 4(%esp),%edx
movl 8(%esp),%eax
call .L000pic
.L000pic:
popl %ecx
leal .Lbswap-.L000pic(%ecx),%ecx
movdqu (%eax),%xmm2
pshufd $78,%xmm2,%xmm2
pshufd $255,%xmm2,%xmm4
movdqa %xmm2,%xmm3
psllq $1,%xmm2
pxor %xmm5,%xmm5
psrlq $63,%xmm3
pcmpgtd %xmm4,%xmm5
pslldq $8,%xmm3
por %xmm3,%xmm2
pand 16(%ecx),%xmm5
pxor %xmm5,%xmm2
movdqa %xmm2,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
xorps %xmm0,%xmm3
xorps %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm2,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm2,%xmm3
movdqu %xmm2,(%edx)
pxor %xmm0,%xmm4
movdqu %xmm0,16(%edx)
.byte 102,15,58,15,227,8
movdqu %xmm4,32(%edx)
ret
.size gcm_init_clmul,.-.L_gcm_init_clmul_begin
.globl gcm_gmult_clmul
.hidden gcm_gmult_clmul
.type gcm_gmult_clmul,@function
.align 16
gcm_gmult_clmul:
.L_gcm_gmult_clmul_begin:
movl 4(%esp),%eax
movl 8(%esp),%edx
call .L001pic
.L001pic:
popl %ecx
leal .Lbswap-.L001pic(%ecx),%ecx
movdqu (%eax),%xmm0
movdqa (%ecx),%xmm5
movups (%edx),%xmm2
.byte 102,15,56,0,197
movups 32(%edx),%xmm4
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
xorps %xmm0,%xmm3
xorps %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.byte 102,15,56,0,197
movdqu %xmm0,(%eax)
ret
.size gcm_gmult_clmul,.-.L_gcm_gmult_clmul_begin
.globl gcm_ghash_clmul
.hidden gcm_ghash_clmul
.type gcm_ghash_clmul,@function
.align 16
gcm_ghash_clmul:
.L_gcm_ghash_clmul_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%eax
movl 24(%esp),%edx
movl 28(%esp),%esi
movl 32(%esp),%ebx
call .L002pic
.L002pic:
popl %ecx
leal .Lbswap-.L002pic(%ecx),%ecx
movdqu (%eax),%xmm0
movdqa (%ecx),%xmm5
movdqu (%edx),%xmm2
.byte 102,15,56,0,197
subl $16,%ebx
jz .L003odd_tail
movdqu (%esi),%xmm3
movdqu 16(%esi),%xmm6
.byte 102,15,56,0,221
.byte 102,15,56,0,245
movdqu 32(%edx),%xmm5
pxor %xmm3,%xmm0
pshufd $78,%xmm6,%xmm3
movdqa %xmm6,%xmm7
pxor %xmm6,%xmm3
leal 32(%esi),%esi
.byte 102,15,58,68,242,0
.byte 102,15,58,68,250,17
.byte 102,15,58,68,221,0
movups 16(%edx),%xmm2
nop
subl $32,%ebx
jbe .L004even_tail
jmp .L005mod_loop
.align 32
.L005mod_loop:
pshufd $78,%xmm0,%xmm4
movdqa %xmm0,%xmm1
pxor %xmm0,%xmm4
nop
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,229,16
movups (%edx),%xmm2
xorps %xmm6,%xmm0
movdqa (%ecx),%xmm5
xorps %xmm7,%xmm1
movdqu (%esi),%xmm7
pxor %xmm0,%xmm3
movdqu 16(%esi),%xmm6
pxor %xmm1,%xmm3
.byte 102,15,56,0,253
pxor %xmm3,%xmm4
movdqa %xmm4,%xmm3
psrldq $8,%xmm4
pslldq $8,%xmm3
pxor %xmm4,%xmm1
pxor %xmm3,%xmm0
.byte 102,15,56,0,245
pxor %xmm7,%xmm1
movdqa %xmm6,%xmm7
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
.byte 102,15,58,68,242,0
movups 32(%edx),%xmm5
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
pshufd $78,%xmm7,%xmm3
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm7,%xmm3
pxor %xmm4,%xmm1
.byte 102,15,58,68,250,17
movups 16(%edx),%xmm2
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.byte 102,15,58,68,221,0
leal 32(%esi),%esi
subl $32,%ebx
ja .L005mod_loop
.L004even_tail:
pshufd $78,%xmm0,%xmm4
movdqa %xmm0,%xmm1
pxor %xmm0,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,229,16
movdqa (%ecx),%xmm5
xorps %xmm6,%xmm0
xorps %xmm7,%xmm1
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
pxor %xmm3,%xmm4
movdqa %xmm4,%xmm3
psrldq $8,%xmm4
pslldq $8,%xmm3
pxor %xmm4,%xmm1
pxor %xmm3,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
testl %ebx,%ebx
jnz .L006done
movups (%edx),%xmm2
.L003odd_tail:
movdqu (%esi),%xmm3
.byte 102,15,56,0,221
pxor %xmm3,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
xorps %xmm0,%xmm3
xorps %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.L006done:
.byte 102,15,56,0,197
movdqu %xmm0,(%eax)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size gcm_ghash_clmul,.-.L_gcm_ghash_clmul_begin
.align 64
.Lbswap:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194
.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67
.byte 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112
.byte 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62
.byte 0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
weix2025/toy
| 15,486
|
deps/boringssl/linux-x86/crypto/fipsmodule/bn-586-linux.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl bn_mul_add_words
.hidden bn_mul_add_words
.type bn_mul_add_words,@function
.align 16
bn_mul_add_words:
.L_bn_mul_add_words_begin:
call .L000PIC_me_up
.L000PIC_me_up:
popl %eax
leal OPENSSL_ia32cap_P-.L000PIC_me_up(%eax),%eax
btl $26,(%eax)
jnc .L001maw_non_sse2
movl 4(%esp),%eax
movl 8(%esp),%edx
movl 12(%esp),%ecx
movd 16(%esp),%mm0
pxor %mm1,%mm1
jmp .L002maw_sse2_entry
.align 16
.L003maw_sse2_unrolled:
movd (%eax),%mm3
paddq %mm3,%mm1
movd (%edx),%mm2
pmuludq %mm0,%mm2
movd 4(%edx),%mm4
pmuludq %mm0,%mm4
movd 8(%edx),%mm6
pmuludq %mm0,%mm6
movd 12(%edx),%mm7
pmuludq %mm0,%mm7
paddq %mm2,%mm1
movd 4(%eax),%mm3
paddq %mm4,%mm3
movd 8(%eax),%mm5
paddq %mm6,%mm5
movd 12(%eax),%mm4
paddq %mm4,%mm7
movd %mm1,(%eax)
movd 16(%edx),%mm2
pmuludq %mm0,%mm2
psrlq $32,%mm1
movd 20(%edx),%mm4
pmuludq %mm0,%mm4
paddq %mm3,%mm1
movd 24(%edx),%mm6
pmuludq %mm0,%mm6
movd %mm1,4(%eax)
psrlq $32,%mm1
movd 28(%edx),%mm3
addl $32,%edx
pmuludq %mm0,%mm3
paddq %mm5,%mm1
movd 16(%eax),%mm5
paddq %mm5,%mm2
movd %mm1,8(%eax)
psrlq $32,%mm1
paddq %mm7,%mm1
movd 20(%eax),%mm5
paddq %mm5,%mm4
movd %mm1,12(%eax)
psrlq $32,%mm1
paddq %mm2,%mm1
movd 24(%eax),%mm5
paddq %mm5,%mm6
movd %mm1,16(%eax)
psrlq $32,%mm1
paddq %mm4,%mm1
movd 28(%eax),%mm5
paddq %mm5,%mm3
movd %mm1,20(%eax)
psrlq $32,%mm1
paddq %mm6,%mm1
movd %mm1,24(%eax)
psrlq $32,%mm1
paddq %mm3,%mm1
movd %mm1,28(%eax)
leal 32(%eax),%eax
psrlq $32,%mm1
subl $8,%ecx
jz .L004maw_sse2_exit
.L002maw_sse2_entry:
testl $4294967288,%ecx
jnz .L003maw_sse2_unrolled
.align 4
.L005maw_sse2_loop:
movd (%edx),%mm2
movd (%eax),%mm3
pmuludq %mm0,%mm2
leal 4(%edx),%edx
paddq %mm3,%mm1
paddq %mm2,%mm1
movd %mm1,(%eax)
subl $1,%ecx
psrlq $32,%mm1
leal 4(%eax),%eax
jnz .L005maw_sse2_loop
.L004maw_sse2_exit:
movd %mm1,%eax
emms
ret
.align 16
.L001maw_non_sse2:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %esi,%esi
movl 20(%esp),%edi
movl 28(%esp),%ecx
movl 24(%esp),%ebx
andl $4294967288,%ecx
movl 32(%esp),%ebp
pushl %ecx
jz .L006maw_finish
.align 16
.L007maw_loop:
movl (%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl (%edi),%eax
adcl $0,%edx
movl %eax,(%edi)
movl %edx,%esi
movl 4(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 4(%edi),%eax
adcl $0,%edx
movl %eax,4(%edi)
movl %edx,%esi
movl 8(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 8(%edi),%eax
adcl $0,%edx
movl %eax,8(%edi)
movl %edx,%esi
movl 12(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 12(%edi),%eax
adcl $0,%edx
movl %eax,12(%edi)
movl %edx,%esi
movl 16(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 16(%edi),%eax
adcl $0,%edx
movl %eax,16(%edi)
movl %edx,%esi
movl 20(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 20(%edi),%eax
adcl $0,%edx
movl %eax,20(%edi)
movl %edx,%esi
movl 24(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 24(%edi),%eax
adcl $0,%edx
movl %eax,24(%edi)
movl %edx,%esi
movl 28(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 28(%edi),%eax
adcl $0,%edx
movl %eax,28(%edi)
movl %edx,%esi
subl $8,%ecx
leal 32(%ebx),%ebx
leal 32(%edi),%edi
jnz .L007maw_loop
.L006maw_finish:
movl 32(%esp),%ecx
andl $7,%ecx
jnz .L008maw_finish2
jmp .L009maw_end
.L008maw_finish2:
movl (%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl (%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,(%edi)
movl %edx,%esi
jz .L009maw_end
movl 4(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 4(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,4(%edi)
movl %edx,%esi
jz .L009maw_end
movl 8(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 8(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,8(%edi)
movl %edx,%esi
jz .L009maw_end
movl 12(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 12(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,12(%edi)
movl %edx,%esi
jz .L009maw_end
movl 16(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 16(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,16(%edi)
movl %edx,%esi
jz .L009maw_end
movl 20(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 20(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,20(%edi)
movl %edx,%esi
jz .L009maw_end
movl 24(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 24(%edi),%eax
adcl $0,%edx
movl %eax,24(%edi)
movl %edx,%esi
.L009maw_end:
movl %esi,%eax
popl %ecx
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_mul_add_words,.-.L_bn_mul_add_words_begin
.globl bn_mul_words
.hidden bn_mul_words
.type bn_mul_words,@function
.align 16
bn_mul_words:
.L_bn_mul_words_begin:
call .L010PIC_me_up
.L010PIC_me_up:
popl %eax
leal OPENSSL_ia32cap_P-.L010PIC_me_up(%eax),%eax
btl $26,(%eax)
jnc .L011mw_non_sse2
movl 4(%esp),%eax
movl 8(%esp),%edx
movl 12(%esp),%ecx
movd 16(%esp),%mm0
pxor %mm1,%mm1
.align 16
.L012mw_sse2_loop:
movd (%edx),%mm2
pmuludq %mm0,%mm2
leal 4(%edx),%edx
paddq %mm2,%mm1
movd %mm1,(%eax)
subl $1,%ecx
psrlq $32,%mm1
leal 4(%eax),%eax
jnz .L012mw_sse2_loop
movd %mm1,%eax
emms
ret
.align 16
.L011mw_non_sse2:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %esi,%esi
movl 20(%esp),%edi
movl 24(%esp),%ebx
movl 28(%esp),%ebp
movl 32(%esp),%ecx
andl $4294967288,%ebp
jz .L013mw_finish
.L014mw_loop:
movl (%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,(%edi)
movl %edx,%esi
movl 4(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,4(%edi)
movl %edx,%esi
movl 8(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,8(%edi)
movl %edx,%esi
movl 12(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,12(%edi)
movl %edx,%esi
movl 16(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,16(%edi)
movl %edx,%esi
movl 20(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,20(%edi)
movl %edx,%esi
movl 24(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,24(%edi)
movl %edx,%esi
movl 28(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,28(%edi)
movl %edx,%esi
addl $32,%ebx
addl $32,%edi
subl $8,%ebp
jz .L013mw_finish
jmp .L014mw_loop
.L013mw_finish:
movl 28(%esp),%ebp
andl $7,%ebp
jnz .L015mw_finish2
jmp .L016mw_end
.L015mw_finish2:
movl (%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,(%edi)
movl %edx,%esi
decl %ebp
jz .L016mw_end
movl 4(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,4(%edi)
movl %edx,%esi
decl %ebp
jz .L016mw_end
movl 8(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,8(%edi)
movl %edx,%esi
decl %ebp
jz .L016mw_end
movl 12(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,12(%edi)
movl %edx,%esi
decl %ebp
jz .L016mw_end
movl 16(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,16(%edi)
movl %edx,%esi
decl %ebp
jz .L016mw_end
movl 20(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,20(%edi)
movl %edx,%esi
decl %ebp
jz .L016mw_end
movl 24(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,24(%edi)
movl %edx,%esi
.L016mw_end:
movl %esi,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_mul_words,.-.L_bn_mul_words_begin
.globl bn_sqr_words
.hidden bn_sqr_words
.type bn_sqr_words,@function
.align 16
bn_sqr_words:
.L_bn_sqr_words_begin:
call .L017PIC_me_up
.L017PIC_me_up:
popl %eax
leal OPENSSL_ia32cap_P-.L017PIC_me_up(%eax),%eax
btl $26,(%eax)
jnc .L018sqr_non_sse2
movl 4(%esp),%eax
movl 8(%esp),%edx
movl 12(%esp),%ecx
.align 16
.L019sqr_sse2_loop:
movd (%edx),%mm0
pmuludq %mm0,%mm0
leal 4(%edx),%edx
movq %mm0,(%eax)
subl $1,%ecx
leal 8(%eax),%eax
jnz .L019sqr_sse2_loop
emms
ret
.align 16
.L018sqr_non_sse2:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%ebx
andl $4294967288,%ebx
jz .L020sw_finish
.L021sw_loop:
movl (%edi),%eax
mull %eax
movl %eax,(%esi)
movl %edx,4(%esi)
movl 4(%edi),%eax
mull %eax
movl %eax,8(%esi)
movl %edx,12(%esi)
movl 8(%edi),%eax
mull %eax
movl %eax,16(%esi)
movl %edx,20(%esi)
movl 12(%edi),%eax
mull %eax
movl %eax,24(%esi)
movl %edx,28(%esi)
movl 16(%edi),%eax
mull %eax
movl %eax,32(%esi)
movl %edx,36(%esi)
movl 20(%edi),%eax
mull %eax
movl %eax,40(%esi)
movl %edx,44(%esi)
movl 24(%edi),%eax
mull %eax
movl %eax,48(%esi)
movl %edx,52(%esi)
movl 28(%edi),%eax
mull %eax
movl %eax,56(%esi)
movl %edx,60(%esi)
addl $32,%edi
addl $64,%esi
subl $8,%ebx
jnz .L021sw_loop
.L020sw_finish:
movl 28(%esp),%ebx
andl $7,%ebx
jz .L022sw_end
movl (%edi),%eax
mull %eax
movl %eax,(%esi)
decl %ebx
movl %edx,4(%esi)
jz .L022sw_end
movl 4(%edi),%eax
mull %eax
movl %eax,8(%esi)
decl %ebx
movl %edx,12(%esi)
jz .L022sw_end
movl 8(%edi),%eax
mull %eax
movl %eax,16(%esi)
decl %ebx
movl %edx,20(%esi)
jz .L022sw_end
movl 12(%edi),%eax
mull %eax
movl %eax,24(%esi)
decl %ebx
movl %edx,28(%esi)
jz .L022sw_end
movl 16(%edi),%eax
mull %eax
movl %eax,32(%esi)
decl %ebx
movl %edx,36(%esi)
jz .L022sw_end
movl 20(%edi),%eax
mull %eax
movl %eax,40(%esi)
decl %ebx
movl %edx,44(%esi)
jz .L022sw_end
movl 24(%edi),%eax
mull %eax
movl %eax,48(%esi)
movl %edx,52(%esi)
.L022sw_end:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_sqr_words,.-.L_bn_sqr_words_begin
.globl bn_div_words
.hidden bn_div_words
.type bn_div_words,@function
.align 16
bn_div_words:
.L_bn_div_words_begin:
movl 4(%esp),%edx
movl 8(%esp),%eax
movl 12(%esp),%ecx
divl %ecx
ret
.size bn_div_words,.-.L_bn_div_words_begin
.globl bn_add_words
.hidden bn_add_words
.type bn_add_words,@function
.align 16
bn_add_words:
.L_bn_add_words_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%ebx
movl 24(%esp),%esi
movl 28(%esp),%edi
movl 32(%esp),%ebp
xorl %eax,%eax
andl $4294967288,%ebp
jz .L023aw_finish
.L024aw_loop:
movl (%esi),%ecx
movl (%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,(%ebx)
movl 4(%esi),%ecx
movl 4(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,4(%ebx)
movl 8(%esi),%ecx
movl 8(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,8(%ebx)
movl 12(%esi),%ecx
movl 12(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,12(%ebx)
movl 16(%esi),%ecx
movl 16(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,16(%ebx)
movl 20(%esi),%ecx
movl 20(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,20(%ebx)
movl 24(%esi),%ecx
movl 24(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,24(%ebx)
movl 28(%esi),%ecx
movl 28(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,28(%ebx)
addl $32,%esi
addl $32,%edi
addl $32,%ebx
subl $8,%ebp
jnz .L024aw_loop
.L023aw_finish:
movl 32(%esp),%ebp
andl $7,%ebp
jz .L025aw_end
movl (%esi),%ecx
movl (%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,(%ebx)
jz .L025aw_end
movl 4(%esi),%ecx
movl 4(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,4(%ebx)
jz .L025aw_end
movl 8(%esi),%ecx
movl 8(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,8(%ebx)
jz .L025aw_end
movl 12(%esi),%ecx
movl 12(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,12(%ebx)
jz .L025aw_end
movl 16(%esi),%ecx
movl 16(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,16(%ebx)
jz .L025aw_end
movl 20(%esi),%ecx
movl 20(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,20(%ebx)
jz .L025aw_end
movl 24(%esi),%ecx
movl 24(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,24(%ebx)
.L025aw_end:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_add_words,.-.L_bn_add_words_begin
.globl bn_sub_words
.hidden bn_sub_words
.type bn_sub_words,@function
.align 16
bn_sub_words:
.L_bn_sub_words_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%ebx
movl 24(%esp),%esi
movl 28(%esp),%edi
movl 32(%esp),%ebp
xorl %eax,%eax
andl $4294967288,%ebp
jz .L026aw_finish
.L027aw_loop:
movl (%esi),%ecx
movl (%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,(%ebx)
movl 4(%esi),%ecx
movl 4(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,4(%ebx)
movl 8(%esi),%ecx
movl 8(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,8(%ebx)
movl 12(%esi),%ecx
movl 12(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,12(%ebx)
movl 16(%esi),%ecx
movl 16(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,16(%ebx)
movl 20(%esi),%ecx
movl 20(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,20(%ebx)
movl 24(%esi),%ecx
movl 24(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,24(%ebx)
movl 28(%esi),%ecx
movl 28(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,28(%ebx)
addl $32,%esi
addl $32,%edi
addl $32,%ebx
subl $8,%ebp
jnz .L027aw_loop
.L026aw_finish:
movl 32(%esp),%ebp
andl $7,%ebp
jz .L028aw_end
movl (%esi),%ecx
movl (%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,(%ebx)
jz .L028aw_end
movl 4(%esi),%ecx
movl 4(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,4(%ebx)
jz .L028aw_end
movl 8(%esi),%ecx
movl 8(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,8(%ebx)
jz .L028aw_end
movl 12(%esi),%ecx
movl 12(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,12(%ebx)
jz .L028aw_end
movl 16(%esi),%ecx
movl 16(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,16(%ebx)
jz .L028aw_end
movl 20(%esi),%ecx
movl 20(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,20(%ebx)
jz .L028aw_end
movl 24(%esi),%ecx
movl 24(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,24(%ebx)
.L028aw_end:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_sub_words,.-.L_bn_sub_words_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
weix2025/toy
| 17,356
|
deps/boringssl/linux-x86/crypto/fipsmodule/co-586-linux.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl bn_mul_comba8
.hidden bn_mul_comba8
.type bn_mul_comba8,@function
.align 16
bn_mul_comba8:
.L_bn_mul_comba8_begin:
pushl %esi
movl 12(%esp),%esi
pushl %edi
movl 20(%esp),%edi
pushl %ebp
pushl %ebx
xorl %ebx,%ebx
movl (%esi),%eax
xorl %ecx,%ecx
movl (%edi),%edx
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl (%edi),%edx
adcl $0,%ebp
movl %ebx,(%eax)
movl 4(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl (%esi),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl (%edi),%edx
adcl $0,%ebx
movl %ecx,4(%eax)
movl 8(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 4(%esi),%eax
adcl %edx,%ebx
movl 4(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl (%esi),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl (%edi),%edx
adcl $0,%ecx
movl %ebp,8(%eax)
movl 12(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 8(%esi),%eax
adcl %edx,%ecx
movl 4(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 4(%esi),%eax
adcl %edx,%ecx
movl 8(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl (%esi),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl (%edi),%edx
adcl $0,%ebp
movl %ebx,12(%eax)
movl 16(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl 12(%esi),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 8(%esi),%eax
adcl %edx,%ebp
movl 8(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 4(%esi),%eax
adcl %edx,%ebp
movl 12(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl (%esi),%eax
adcl %edx,%ebp
movl 16(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl (%edi),%edx
adcl $0,%ebx
movl %ecx,16(%eax)
movl 20(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 16(%esi),%eax
adcl %edx,%ebx
movl 4(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 12(%esi),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 8(%esi),%eax
adcl %edx,%ebx
movl 12(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 4(%esi),%eax
adcl %edx,%ebx
movl 16(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl (%esi),%eax
adcl %edx,%ebx
movl 20(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl (%edi),%edx
adcl $0,%ecx
movl %ebp,20(%eax)
movl 24(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esi),%eax
adcl %edx,%ecx
movl 4(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 16(%esi),%eax
adcl %edx,%ecx
movl 8(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 12(%esi),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 8(%esi),%eax
adcl %edx,%ecx
movl 16(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 4(%esi),%eax
adcl %edx,%ecx
movl 20(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl (%esi),%eax
adcl %edx,%ecx
movl 24(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl (%edi),%edx
adcl $0,%ebp
movl %ebx,24(%eax)
movl 28(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl 24(%esi),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esi),%eax
adcl %edx,%ebp
movl 8(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 16(%esi),%eax
adcl %edx,%ebp
movl 12(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 12(%esi),%eax
adcl %edx,%ebp
movl 16(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 8(%esi),%eax
adcl %edx,%ebp
movl 20(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 4(%esi),%eax
adcl %edx,%ebp
movl 24(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl (%esi),%eax
adcl %edx,%ebp
movl 28(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
movl %ecx,28(%eax)
movl 28(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 24(%esi),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esi),%eax
adcl %edx,%ebx
movl 12(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 16(%esi),%eax
adcl %edx,%ebx
movl 16(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 12(%esi),%eax
adcl %edx,%ebx
movl 20(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 8(%esi),%eax
adcl %edx,%ebx
movl 24(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 4(%esi),%eax
adcl %edx,%ebx
movl 28(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
movl %ebp,32(%eax)
movl 28(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 24(%esi),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esi),%eax
adcl %edx,%ecx
movl 16(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 16(%esi),%eax
adcl %edx,%ecx
movl 20(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 12(%esi),%eax
adcl %edx,%ecx
movl 24(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 8(%esi),%eax
adcl %edx,%ecx
movl 28(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
movl %ebx,36(%eax)
movl 28(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl 24(%esi),%eax
adcl %edx,%ebp
movl 16(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esi),%eax
adcl %edx,%ebp
movl 20(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 16(%esi),%eax
adcl %edx,%ebp
movl 24(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 12(%esi),%eax
adcl %edx,%ebp
movl 28(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl 16(%edi),%edx
adcl $0,%ebx
movl %ecx,40(%eax)
movl 28(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 24(%esi),%eax
adcl %edx,%ebx
movl 20(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esi),%eax
adcl %edx,%ebx
movl 24(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 16(%esi),%eax
adcl %edx,%ebx
movl 28(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl 20(%edi),%edx
adcl $0,%ecx
movl %ebp,44(%eax)
movl 28(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 24(%esi),%eax
adcl %edx,%ecx
movl 24(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esi),%eax
adcl %edx,%ecx
movl 28(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl 24(%edi),%edx
adcl $0,%ebp
movl %ebx,48(%eax)
movl 28(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl 24(%esi),%eax
adcl %edx,%ebp
movl 28(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl 28(%edi),%edx
adcl $0,%ebx
movl %ecx,52(%eax)
movl 28(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
adcl $0,%ecx
movl %ebp,56(%eax)
movl %ebx,60(%eax)
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
.size bn_mul_comba8,.-.L_bn_mul_comba8_begin
.globl bn_mul_comba4
.hidden bn_mul_comba4
.type bn_mul_comba4,@function
.align 16
bn_mul_comba4:
.L_bn_mul_comba4_begin:
pushl %esi
movl 12(%esp),%esi
pushl %edi
movl 20(%esp),%edi
pushl %ebp
pushl %ebx
xorl %ebx,%ebx
movl (%esi),%eax
xorl %ecx,%ecx
movl (%edi),%edx
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl (%edi),%edx
adcl $0,%ebp
movl %ebx,(%eax)
movl 4(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl (%esi),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl (%edi),%edx
adcl $0,%ebx
movl %ecx,4(%eax)
movl 8(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 4(%esi),%eax
adcl %edx,%ebx
movl 4(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl (%esi),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl (%edi),%edx
adcl $0,%ecx
movl %ebp,8(%eax)
movl 12(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 8(%esi),%eax
adcl %edx,%ecx
movl 4(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 4(%esi),%eax
adcl %edx,%ecx
movl 8(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl (%esi),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl 4(%edi),%edx
adcl $0,%ebp
movl %ebx,12(%eax)
movl 12(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl 8(%esi),%eax
adcl %edx,%ebp
movl 8(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 4(%esi),%eax
adcl %edx,%ebp
movl 12(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl 8(%edi),%edx
adcl $0,%ebx
movl %ecx,16(%eax)
movl 12(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 8(%esi),%eax
adcl %edx,%ebx
movl 12(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl 12(%edi),%edx
adcl $0,%ecx
movl %ebp,20(%eax)
movl 12(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
adcl $0,%ebp
movl %ebx,24(%eax)
movl %ecx,28(%eax)
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
.size bn_mul_comba4,.-.L_bn_mul_comba4_begin
.globl bn_sqr_comba8
.hidden bn_sqr_comba8
.type bn_sqr_comba8,@function
.align 16
bn_sqr_comba8:
.L_bn_sqr_comba8_begin:
pushl %esi
pushl %edi
pushl %ebp
pushl %ebx
movl 20(%esp),%edi
movl 24(%esp),%esi
xorl %ebx,%ebx
xorl %ecx,%ecx
movl (%esi),%eax
xorl %ebp,%ebp
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
movl (%esi),%edx
adcl $0,%ebp
movl %ebx,(%edi)
movl 4(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%eax
adcl $0,%ebx
movl %ecx,4(%edi)
movl (%esi),%edx
xorl %ecx,%ecx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 4(%esi),%eax
adcl $0,%ecx
mull %eax
addl %eax,%ebp
adcl %edx,%ebx
movl (%esi),%edx
adcl $0,%ecx
movl %ebp,8(%edi)
movl 12(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 8(%esi),%eax
adcl $0,%ebp
movl 4(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 16(%esi),%eax
adcl $0,%ebp
movl %ebx,12(%edi)
movl (%esi),%edx
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 12(%esi),%eax
adcl $0,%ebx
movl 4(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%eax
adcl $0,%ebx
mull %eax
addl %eax,%ecx
adcl %edx,%ebp
movl (%esi),%edx
adcl $0,%ebx
movl %ecx,16(%edi)
movl 20(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 16(%esi),%eax
adcl $0,%ecx
movl 4(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 12(%esi),%eax
adcl $0,%ecx
movl 8(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 24(%esi),%eax
adcl $0,%ecx
movl %ebp,20(%edi)
movl (%esi),%edx
xorl %ebp,%ebp
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 20(%esi),%eax
adcl $0,%ebp
movl 4(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 16(%esi),%eax
adcl $0,%ebp
movl 8(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 12(%esi),%eax
adcl $0,%ebp
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
movl (%esi),%edx
adcl $0,%ebp
movl %ebx,24(%edi)
movl 28(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 24(%esi),%eax
adcl $0,%ebx
movl 4(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 20(%esi),%eax
adcl $0,%ebx
movl 8(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 16(%esi),%eax
adcl $0,%ebx
movl 12(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 28(%esi),%eax
adcl $0,%ebx
movl %ecx,28(%edi)
movl 4(%esi),%edx
xorl %ecx,%ecx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 24(%esi),%eax
adcl $0,%ecx
movl 8(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 20(%esi),%eax
adcl $0,%ecx
movl 12(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 16(%esi),%eax
adcl $0,%ecx
mull %eax
addl %eax,%ebp
adcl %edx,%ebx
movl 8(%esi),%edx
adcl $0,%ecx
movl %ebp,32(%edi)
movl 28(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 24(%esi),%eax
adcl $0,%ebp
movl 12(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 20(%esi),%eax
adcl $0,%ebp
movl 16(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 28(%esi),%eax
adcl $0,%ebp
movl %ebx,36(%edi)
movl 12(%esi),%edx
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 24(%esi),%eax
adcl $0,%ebx
movl 16(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 20(%esi),%eax
adcl $0,%ebx
mull %eax
addl %eax,%ecx
adcl %edx,%ebp
movl 16(%esi),%edx
adcl $0,%ebx
movl %ecx,40(%edi)
movl 28(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 24(%esi),%eax
adcl $0,%ecx
movl 20(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 28(%esi),%eax
adcl $0,%ecx
movl %ebp,44(%edi)
movl 20(%esi),%edx
xorl %ebp,%ebp
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 24(%esi),%eax
adcl $0,%ebp
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
movl 24(%esi),%edx
adcl $0,%ebp
movl %ebx,48(%edi)
movl 28(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 28(%esi),%eax
adcl $0,%ebx
movl %ecx,52(%edi)
xorl %ecx,%ecx
mull %eax
addl %eax,%ebp
adcl %edx,%ebx
adcl $0,%ecx
movl %ebp,56(%edi)
movl %ebx,60(%edi)
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
.size bn_sqr_comba8,.-.L_bn_sqr_comba8_begin
.globl bn_sqr_comba4
.hidden bn_sqr_comba4
.type bn_sqr_comba4,@function
.align 16
bn_sqr_comba4:
.L_bn_sqr_comba4_begin:
pushl %esi
pushl %edi
pushl %ebp
pushl %ebx
movl 20(%esp),%edi
movl 24(%esp),%esi
xorl %ebx,%ebx
xorl %ecx,%ecx
movl (%esi),%eax
xorl %ebp,%ebp
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
movl (%esi),%edx
adcl $0,%ebp
movl %ebx,(%edi)
movl 4(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%eax
adcl $0,%ebx
movl %ecx,4(%edi)
movl (%esi),%edx
xorl %ecx,%ecx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 4(%esi),%eax
adcl $0,%ecx
mull %eax
addl %eax,%ebp
adcl %edx,%ebx
movl (%esi),%edx
adcl $0,%ecx
movl %ebp,8(%edi)
movl 12(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 8(%esi),%eax
adcl $0,%ebp
movl 4(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 12(%esi),%eax
adcl $0,%ebp
movl %ebx,12(%edi)
movl 4(%esi),%edx
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%eax
adcl $0,%ebx
mull %eax
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%edx
adcl $0,%ebx
movl %ecx,16(%edi)
movl 12(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 12(%esi),%eax
adcl $0,%ecx
movl %ebp,20(%edi)
xorl %ebp,%ebp
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
adcl $0,%ebp
movl %ebx,24(%edi)
movl %ecx,28(%edi)
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
.size bn_sqr_comba4,.-.L_bn_sqr_comba4_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
weix2025/toy
| 19,414
|
deps/boringssl/linux-x86/crypto/chacha/chacha-x86-linux.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl ChaCha20_ctr32
.hidden ChaCha20_ctr32
.type ChaCha20_ctr32,@function
.align 16
ChaCha20_ctr32:
.L_ChaCha20_ctr32_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %eax,%eax
cmpl 28(%esp),%eax
je .L000no_data
call .Lpic_point
.Lpic_point:
popl %eax
leal OPENSSL_ia32cap_P-.Lpic_point(%eax),%ebp
testl $16777216,(%ebp)
jz .L001x86
testl $512,4(%ebp)
jz .L001x86
jmp .Lssse3_shortcut
.L001x86:
movl 32(%esp),%esi
movl 36(%esp),%edi
subl $132,%esp
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edx
movl %eax,80(%esp)
movl %ebx,84(%esp)
movl %ecx,88(%esp)
movl %edx,92(%esp)
movl 16(%esi),%eax
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edx
movl %eax,96(%esp)
movl %ebx,100(%esp)
movl %ecx,104(%esp)
movl %edx,108(%esp)
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
subl $1,%eax
movl %eax,112(%esp)
movl %ebx,116(%esp)
movl %ecx,120(%esp)
movl %edx,124(%esp)
jmp .L002entry
.align 16
.L003outer_loop:
movl %ebx,156(%esp)
movl %eax,152(%esp)
movl %ecx,160(%esp)
.L002entry:
movl $1634760805,%eax
movl $857760878,4(%esp)
movl $2036477234,8(%esp)
movl $1797285236,12(%esp)
movl 84(%esp),%ebx
movl 88(%esp),%ebp
movl 104(%esp),%ecx
movl 108(%esp),%esi
movl 116(%esp),%edx
movl 120(%esp),%edi
movl %ebx,20(%esp)
movl %ebp,24(%esp)
movl %ecx,40(%esp)
movl %esi,44(%esp)
movl %edx,52(%esp)
movl %edi,56(%esp)
movl 92(%esp),%ebx
movl 124(%esp),%edi
movl 112(%esp),%edx
movl 80(%esp),%ebp
movl 96(%esp),%ecx
movl 100(%esp),%esi
addl $1,%edx
movl %ebx,28(%esp)
movl %edi,60(%esp)
movl %edx,112(%esp)
movl $10,%ebx
jmp .L004loop
.align 16
.L004loop:
addl %ebp,%eax
movl %ebx,128(%esp)
movl %ebp,%ebx
xorl %eax,%edx
roll $16,%edx
addl %edx,%ecx
xorl %ecx,%ebx
movl 52(%esp),%edi
roll $12,%ebx
movl 20(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,(%esp)
roll $8,%edx
movl 4(%esp),%eax
addl %edx,%ecx
movl %edx,48(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
movl %ecx,32(%esp)
roll $16,%edi
movl %ebx,16(%esp)
addl %edi,%esi
movl 40(%esp),%ecx
xorl %esi,%ebp
movl 56(%esp),%edx
roll $12,%ebp
movl 24(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,4(%esp)
roll $8,%edi
movl 8(%esp),%eax
addl %edi,%esi
movl %edi,52(%esp)
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
movl %esi,36(%esp)
roll $16,%edx
movl %ebp,20(%esp)
addl %edx,%ecx
movl 44(%esp),%esi
xorl %ecx,%ebx
movl 60(%esp),%edi
roll $12,%ebx
movl 28(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,8(%esp)
roll $8,%edx
movl 12(%esp),%eax
addl %edx,%ecx
movl %edx,56(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
roll $16,%edi
movl %ebx,24(%esp)
addl %edi,%esi
xorl %esi,%ebp
roll $12,%ebp
movl 20(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,12(%esp)
roll $8,%edi
movl (%esp),%eax
addl %edi,%esi
movl %edi,%edx
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
roll $16,%edx
movl %ebp,28(%esp)
addl %edx,%ecx
xorl %ecx,%ebx
movl 48(%esp),%edi
roll $12,%ebx
movl 24(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,(%esp)
roll $8,%edx
movl 4(%esp),%eax
addl %edx,%ecx
movl %edx,60(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
movl %ecx,40(%esp)
roll $16,%edi
movl %ebx,20(%esp)
addl %edi,%esi
movl 32(%esp),%ecx
xorl %esi,%ebp
movl 52(%esp),%edx
roll $12,%ebp
movl 28(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,4(%esp)
roll $8,%edi
movl 8(%esp),%eax
addl %edi,%esi
movl %edi,48(%esp)
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
movl %esi,44(%esp)
roll $16,%edx
movl %ebp,24(%esp)
addl %edx,%ecx
movl 36(%esp),%esi
xorl %ecx,%ebx
movl 56(%esp),%edi
roll $12,%ebx
movl 16(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,8(%esp)
roll $8,%edx
movl 12(%esp),%eax
addl %edx,%ecx
movl %edx,52(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
roll $16,%edi
movl %ebx,28(%esp)
addl %edi,%esi
xorl %esi,%ebp
movl 48(%esp),%edx
roll $12,%ebp
movl 128(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,12(%esp)
roll $8,%edi
movl (%esp),%eax
addl %edi,%esi
movl %edi,56(%esp)
xorl %esi,%ebp
roll $7,%ebp
decl %ebx
jnz .L004loop
movl 160(%esp),%ebx
addl $1634760805,%eax
addl 80(%esp),%ebp
addl 96(%esp),%ecx
addl 100(%esp),%esi
cmpl $64,%ebx
jb .L005tail
movl 156(%esp),%ebx
addl 112(%esp),%edx
addl 120(%esp),%edi
xorl (%ebx),%eax
xorl 16(%ebx),%ebp
movl %eax,(%esp)
movl 152(%esp),%eax
xorl 32(%ebx),%ecx
xorl 36(%ebx),%esi
xorl 48(%ebx),%edx
xorl 56(%ebx),%edi
movl %ebp,16(%eax)
movl %ecx,32(%eax)
movl %esi,36(%eax)
movl %edx,48(%eax)
movl %edi,56(%eax)
movl 4(%esp),%ebp
movl 8(%esp),%ecx
movl 12(%esp),%esi
movl 20(%esp),%edx
movl 24(%esp),%edi
addl $857760878,%ebp
addl $2036477234,%ecx
addl $1797285236,%esi
addl 84(%esp),%edx
addl 88(%esp),%edi
xorl 4(%ebx),%ebp
xorl 8(%ebx),%ecx
xorl 12(%ebx),%esi
xorl 20(%ebx),%edx
xorl 24(%ebx),%edi
movl %ebp,4(%eax)
movl %ecx,8(%eax)
movl %esi,12(%eax)
movl %edx,20(%eax)
movl %edi,24(%eax)
movl 28(%esp),%ebp
movl 40(%esp),%ecx
movl 44(%esp),%esi
movl 52(%esp),%edx
movl 60(%esp),%edi
addl 92(%esp),%ebp
addl 104(%esp),%ecx
addl 108(%esp),%esi
addl 116(%esp),%edx
addl 124(%esp),%edi
xorl 28(%ebx),%ebp
xorl 40(%ebx),%ecx
xorl 44(%ebx),%esi
xorl 52(%ebx),%edx
xorl 60(%ebx),%edi
leal 64(%ebx),%ebx
movl %ebp,28(%eax)
movl (%esp),%ebp
movl %ecx,40(%eax)
movl 160(%esp),%ecx
movl %esi,44(%eax)
movl %edx,52(%eax)
movl %edi,60(%eax)
movl %ebp,(%eax)
leal 64(%eax),%eax
subl $64,%ecx
jnz .L003outer_loop
jmp .L006done
.L005tail:
addl 112(%esp),%edx
addl 120(%esp),%edi
movl %eax,(%esp)
movl %ebp,16(%esp)
movl %ecx,32(%esp)
movl %esi,36(%esp)
movl %edx,48(%esp)
movl %edi,56(%esp)
movl 4(%esp),%ebp
movl 8(%esp),%ecx
movl 12(%esp),%esi
movl 20(%esp),%edx
movl 24(%esp),%edi
addl $857760878,%ebp
addl $2036477234,%ecx
addl $1797285236,%esi
addl 84(%esp),%edx
addl 88(%esp),%edi
movl %ebp,4(%esp)
movl %ecx,8(%esp)
movl %esi,12(%esp)
movl %edx,20(%esp)
movl %edi,24(%esp)
movl 28(%esp),%ebp
movl 40(%esp),%ecx
movl 44(%esp),%esi
movl 52(%esp),%edx
movl 60(%esp),%edi
addl 92(%esp),%ebp
addl 104(%esp),%ecx
addl 108(%esp),%esi
addl 116(%esp),%edx
addl 124(%esp),%edi
movl %ebp,28(%esp)
movl 156(%esp),%ebp
movl %ecx,40(%esp)
movl 152(%esp),%ecx
movl %esi,44(%esp)
xorl %esi,%esi
movl %edx,52(%esp)
movl %edi,60(%esp)
xorl %eax,%eax
xorl %edx,%edx
.L007tail_loop:
movb (%esi,%ebp,1),%al
movb (%esp,%esi,1),%dl
leal 1(%esi),%esi
xorb %dl,%al
movb %al,-1(%ecx,%esi,1)
decl %ebx
jnz .L007tail_loop
.L006done:
addl $132,%esp
.L000no_data:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size ChaCha20_ctr32,.-.L_ChaCha20_ctr32_begin
.globl ChaCha20_ssse3
.hidden ChaCha20_ssse3
.type ChaCha20_ssse3,@function
.align 16
ChaCha20_ssse3:
.L_ChaCha20_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
.Lssse3_shortcut:
movl 20(%esp),%edi
movl 24(%esp),%esi
movl 28(%esp),%ecx
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl %esp,%ebp
subl $524,%esp
andl $-64,%esp
movl %ebp,512(%esp)
leal .Lssse3_data-.Lpic_point(%eax),%eax
movdqu (%ebx),%xmm3
cmpl $256,%ecx
jb .L0081x
movl %edx,516(%esp)
movl %ebx,520(%esp)
subl $256,%ecx
leal 384(%esp),%ebp
movdqu (%edx),%xmm7
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
paddd 48(%eax),%xmm0
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
psubd 64(%eax),%xmm0
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,64(%ebp)
movdqa %xmm1,80(%ebp)
movdqa %xmm2,96(%ebp)
movdqa %xmm3,112(%ebp)
movdqu 16(%edx),%xmm3
movdqa %xmm4,-64(%ebp)
movdqa %xmm5,-48(%ebp)
movdqa %xmm6,-32(%ebp)
movdqa %xmm7,-16(%ebp)
movdqa 32(%eax),%xmm7
leal 128(%esp),%ebx
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,(%ebp)
movdqa %xmm1,16(%ebp)
movdqa %xmm2,32(%ebp)
movdqa %xmm3,48(%ebp)
movdqa %xmm4,-128(%ebp)
movdqa %xmm5,-112(%ebp)
movdqa %xmm6,-96(%ebp)
movdqa %xmm7,-80(%ebp)
leal 128(%esi),%esi
leal 128(%edi),%edi
jmp .L009outer_loop
.align 16
.L009outer_loop:
movdqa -112(%ebp),%xmm1
movdqa -96(%ebp),%xmm2
movdqa -80(%ebp),%xmm3
movdqa -48(%ebp),%xmm5
movdqa -32(%ebp),%xmm6
movdqa -16(%ebp),%xmm7
movdqa %xmm1,-112(%ebx)
movdqa %xmm2,-96(%ebx)
movdqa %xmm3,-80(%ebx)
movdqa %xmm5,-48(%ebx)
movdqa %xmm6,-32(%ebx)
movdqa %xmm7,-16(%ebx)
movdqa 32(%ebp),%xmm2
movdqa 48(%ebp),%xmm3
movdqa 64(%ebp),%xmm4
movdqa 80(%ebp),%xmm5
movdqa 96(%ebp),%xmm6
movdqa 112(%ebp),%xmm7
paddd 64(%eax),%xmm4
movdqa %xmm2,32(%ebx)
movdqa %xmm3,48(%ebx)
movdqa %xmm4,64(%ebx)
movdqa %xmm5,80(%ebx)
movdqa %xmm6,96(%ebx)
movdqa %xmm7,112(%ebx)
movdqa %xmm4,64(%ebp)
movdqa -128(%ebp),%xmm0
movdqa %xmm4,%xmm6
movdqa -64(%ebp),%xmm3
movdqa (%ebp),%xmm4
movdqa 16(%ebp),%xmm5
movl $10,%edx
nop
.align 16
.L010loop:
paddd %xmm3,%xmm0
movdqa %xmm3,%xmm2
pxor %xmm0,%xmm6
pshufb (%eax),%xmm6
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -48(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 80(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,64(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-64(%ebx)
paddd %xmm7,%xmm5
movdqa 32(%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -32(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 96(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,80(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,16(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-48(%ebx)
paddd %xmm6,%xmm4
movdqa 48(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -16(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 112(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,96(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-32(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa -48(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,%xmm6
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
pshufb (%eax),%xmm6
movdqa %xmm3,-16(%ebx)
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -32(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 64(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,112(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,32(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-48(%ebx)
paddd %xmm7,%xmm5
movdqa (%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -16(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 80(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,64(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,48(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-32(%ebx)
paddd %xmm6,%xmm4
movdqa 16(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -64(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 96(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,80(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-16(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 64(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,96(%ebx)
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
por %xmm1,%xmm3
decl %edx
jnz .L010loop
movdqa %xmm3,-64(%ebx)
movdqa %xmm4,(%ebx)
movdqa %xmm5,16(%ebx)
movdqa %xmm6,64(%ebx)
movdqa %xmm7,96(%ebx)
movdqa -112(%ebx),%xmm1
movdqa -96(%ebx),%xmm2
movdqa -80(%ebx),%xmm3
paddd -128(%ebp),%xmm0
paddd -112(%ebp),%xmm1
paddd -96(%ebp),%xmm2
paddd -80(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa -64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa -48(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa -32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa -16(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd -64(%ebp),%xmm0
paddd -48(%ebp),%xmm1
paddd -32(%ebp),%xmm2
paddd -16(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa (%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 16(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 48(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd (%ebp),%xmm0
paddd 16(%ebp),%xmm1
paddd 32(%ebp),%xmm2
paddd 48(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa 64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 80(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 96(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 112(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd 64(%ebp),%xmm0
paddd 80(%ebp),%xmm1
paddd 96(%ebp),%xmm2
paddd 112(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 208(%esi),%esi
pxor %xmm0,%xmm4
pxor %xmm1,%xmm5
pxor %xmm2,%xmm6
pxor %xmm3,%xmm7
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 208(%edi),%edi
subl $256,%ecx
jnc .L009outer_loop
addl $256,%ecx
jz .L011done
movl 520(%esp),%ebx
leal -128(%esi),%esi
movl 516(%esp),%edx
leal -128(%edi),%edi
movd 64(%ebp),%xmm2
movdqu (%ebx),%xmm3
paddd 96(%eax),%xmm2
pand 112(%eax),%xmm3
por %xmm2,%xmm3
.L0081x:
movdqa 32(%eax),%xmm0
movdqu (%edx),%xmm1
movdqu 16(%edx),%xmm2
movdqa (%eax),%xmm6
movdqa 16(%eax),%xmm7
movl %ebp,48(%esp)
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
movl $10,%edx
jmp .L012loop1x
.align 16
.L013outer1x:
movdqa 80(%eax),%xmm3
movdqa (%esp),%xmm0
movdqa 16(%esp),%xmm1
movdqa 32(%esp),%xmm2
paddd 48(%esp),%xmm3
movl $10,%edx
movdqa %xmm3,48(%esp)
jmp .L012loop1x
.align 16
.L012loop1x:
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $57,%xmm1,%xmm1
pshufd $147,%xmm3,%xmm3
nop
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $147,%xmm1,%xmm1
pshufd $57,%xmm3,%xmm3
decl %edx
jnz .L012loop1x
paddd (%esp),%xmm0
paddd 16(%esp),%xmm1
paddd 32(%esp),%xmm2
paddd 48(%esp),%xmm3
cmpl $64,%ecx
jb .L014tail
movdqu (%esi),%xmm4
movdqu 16(%esi),%xmm5
pxor %xmm4,%xmm0
movdqu 32(%esi),%xmm4
pxor %xmm5,%xmm1
movdqu 48(%esi),%xmm5
pxor %xmm4,%xmm2
pxor %xmm5,%xmm3
leal 64(%esi),%esi
movdqu %xmm0,(%edi)
movdqu %xmm1,16(%edi)
movdqu %xmm2,32(%edi)
movdqu %xmm3,48(%edi)
leal 64(%edi),%edi
subl $64,%ecx
jnz .L013outer1x
jmp .L011done
.L014tail:
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
xorl %eax,%eax
xorl %edx,%edx
xorl %ebp,%ebp
.L015tail_loop:
movb (%esp,%ebp,1),%al
movb (%esi,%ebp,1),%dl
leal 1(%ebp),%ebp
xorb %dl,%al
movb %al,-1(%edi,%ebp,1)
decl %ecx
jnz .L015tail_loop
.L011done:
movl 512(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size ChaCha20_ssse3,.-.L_ChaCha20_ssse3_begin
.align 64
.Lssse3_data:
.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13
.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14
.long 1634760805,857760878,2036477234,1797285236
.long 0,1,2,3
.long 4,4,4,4
.long 1,0,0,0
.long 4,0,0,0
.long 0,-1,-1,-1
.align 64
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54
.byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32
.byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111
.byte 114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
weix2025/toy
| 10,917
|
deps/boringssl/win-aarch64/crypto/test/trampoline-armv8-win.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
.text
// abi_test_trampoline loads callee-saved registers from |state|, calls |func|
// with |argv|, then saves the callee-saved registers into |state|. It returns
// the result of |func|. The |unwind| argument is unused.
// uint64_t abi_test_trampoline(void (*func)(...), CallerState *state,
// const uint64_t *argv, size_t argc,
// uint64_t unwind);
.globl abi_test_trampoline
.align 4
abi_test_trampoline:
Labi_test_trampoline_begin:
AARCH64_SIGN_LINK_REGISTER
// Stack layout (low to high addresses)
// x29,x30 (16 bytes)
// d8-d15 (64 bytes)
// x19-x28 (80 bytes)
// x1 (8 bytes)
// padding (8 bytes)
stp x29, x30, [sp, #-176]!
mov x29, sp
// Saved callee-saved registers and |state|.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
stp x19, x20, [sp, #80]
stp x21, x22, [sp, #96]
stp x23, x24, [sp, #112]
stp x25, x26, [sp, #128]
stp x27, x28, [sp, #144]
str x1, [sp, #160]
// Load registers from |state|, with the exception of x29. x29 is the
// frame pointer and also callee-saved, but AAPCS64 allows platforms to
// mandate that x29 always point to a frame. iOS64 does so, which means
// we cannot fill x29 with entropy without violating ABI rules
// ourselves. x29 is tested separately below.
ldp d8, d9, [x1], #16
ldp d10, d11, [x1], #16
ldp d12, d13, [x1], #16
ldp d14, d15, [x1], #16
ldp x19, x20, [x1], #16
ldp x21, x22, [x1], #16
ldp x23, x24, [x1], #16
ldp x25, x26, [x1], #16
ldp x27, x28, [x1], #16
// Move parameters into temporary registers.
mov x9, x0
mov x10, x2
mov x11, x3
// Load parameters into registers.
cbz x11, Largs_done
ldr x0, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x1, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x2, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x3, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x4, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x5, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x6, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x7, [x10], #8
Largs_done:
blr x9
// Reload |state| and store registers.
ldr x1, [sp, #160]
stp d8, d9, [x1], #16
stp d10, d11, [x1], #16
stp d12, d13, [x1], #16
stp d14, d15, [x1], #16
stp x19, x20, [x1], #16
stp x21, x22, [x1], #16
stp x23, x24, [x1], #16
stp x25, x26, [x1], #16
stp x27, x28, [x1], #16
// |func| is required to preserve x29, the frame pointer. We cannot load
// random values into x29 (see comment above), so compare it against the
// expected value and zero the field of |state| if corrupted.
mov x9, sp
cmp x29, x9
b.eq Lx29_ok
str xzr, [x1]
Lx29_ok:
// Restore callee-saved registers.
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
ldp x19, x20, [sp, #80]
ldp x21, x22, [sp, #96]
ldp x23, x24, [sp, #112]
ldp x25, x26, [sp, #128]
ldp x27, x28, [sp, #144]
ldp x29, x30, [sp], #176
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl abi_test_clobber_x0
.align 4
abi_test_clobber_x0:
AARCH64_VALID_CALL_TARGET
mov x0, xzr
ret
.globl abi_test_clobber_x1
.align 4
abi_test_clobber_x1:
AARCH64_VALID_CALL_TARGET
mov x1, xzr
ret
.globl abi_test_clobber_x2
.align 4
abi_test_clobber_x2:
AARCH64_VALID_CALL_TARGET
mov x2, xzr
ret
.globl abi_test_clobber_x3
.align 4
abi_test_clobber_x3:
AARCH64_VALID_CALL_TARGET
mov x3, xzr
ret
.globl abi_test_clobber_x4
.align 4
abi_test_clobber_x4:
AARCH64_VALID_CALL_TARGET
mov x4, xzr
ret
.globl abi_test_clobber_x5
.align 4
abi_test_clobber_x5:
AARCH64_VALID_CALL_TARGET
mov x5, xzr
ret
.globl abi_test_clobber_x6
.align 4
abi_test_clobber_x6:
AARCH64_VALID_CALL_TARGET
mov x6, xzr
ret
.globl abi_test_clobber_x7
.align 4
abi_test_clobber_x7:
AARCH64_VALID_CALL_TARGET
mov x7, xzr
ret
.globl abi_test_clobber_x8
.align 4
abi_test_clobber_x8:
AARCH64_VALID_CALL_TARGET
mov x8, xzr
ret
.globl abi_test_clobber_x9
.align 4
abi_test_clobber_x9:
AARCH64_VALID_CALL_TARGET
mov x9, xzr
ret
.globl abi_test_clobber_x10
.align 4
abi_test_clobber_x10:
AARCH64_VALID_CALL_TARGET
mov x10, xzr
ret
.globl abi_test_clobber_x11
.align 4
abi_test_clobber_x11:
AARCH64_VALID_CALL_TARGET
mov x11, xzr
ret
.globl abi_test_clobber_x12
.align 4
abi_test_clobber_x12:
AARCH64_VALID_CALL_TARGET
mov x12, xzr
ret
.globl abi_test_clobber_x13
.align 4
abi_test_clobber_x13:
AARCH64_VALID_CALL_TARGET
mov x13, xzr
ret
.globl abi_test_clobber_x14
.align 4
abi_test_clobber_x14:
AARCH64_VALID_CALL_TARGET
mov x14, xzr
ret
.globl abi_test_clobber_x15
.align 4
abi_test_clobber_x15:
AARCH64_VALID_CALL_TARGET
mov x15, xzr
ret
.globl abi_test_clobber_x16
.align 4
abi_test_clobber_x16:
AARCH64_VALID_CALL_TARGET
mov x16, xzr
ret
.globl abi_test_clobber_x17
.align 4
abi_test_clobber_x17:
AARCH64_VALID_CALL_TARGET
mov x17, xzr
ret
.globl abi_test_clobber_x19
.align 4
abi_test_clobber_x19:
AARCH64_VALID_CALL_TARGET
mov x19, xzr
ret
.globl abi_test_clobber_x20
.align 4
abi_test_clobber_x20:
AARCH64_VALID_CALL_TARGET
mov x20, xzr
ret
.globl abi_test_clobber_x21
.align 4
abi_test_clobber_x21:
AARCH64_VALID_CALL_TARGET
mov x21, xzr
ret
.globl abi_test_clobber_x22
.align 4
abi_test_clobber_x22:
AARCH64_VALID_CALL_TARGET
mov x22, xzr
ret
.globl abi_test_clobber_x23
.align 4
abi_test_clobber_x23:
AARCH64_VALID_CALL_TARGET
mov x23, xzr
ret
.globl abi_test_clobber_x24
.align 4
abi_test_clobber_x24:
AARCH64_VALID_CALL_TARGET
mov x24, xzr
ret
.globl abi_test_clobber_x25
.align 4
abi_test_clobber_x25:
AARCH64_VALID_CALL_TARGET
mov x25, xzr
ret
.globl abi_test_clobber_x26
.align 4
abi_test_clobber_x26:
AARCH64_VALID_CALL_TARGET
mov x26, xzr
ret
.globl abi_test_clobber_x27
.align 4
abi_test_clobber_x27:
AARCH64_VALID_CALL_TARGET
mov x27, xzr
ret
.globl abi_test_clobber_x28
.align 4
abi_test_clobber_x28:
AARCH64_VALID_CALL_TARGET
mov x28, xzr
ret
.globl abi_test_clobber_x29
.align 4
abi_test_clobber_x29:
AARCH64_VALID_CALL_TARGET
mov x29, xzr
ret
.globl abi_test_clobber_d0
.align 4
abi_test_clobber_d0:
AARCH64_VALID_CALL_TARGET
fmov d0, xzr
ret
.globl abi_test_clobber_d1
.align 4
abi_test_clobber_d1:
AARCH64_VALID_CALL_TARGET
fmov d1, xzr
ret
.globl abi_test_clobber_d2
.align 4
abi_test_clobber_d2:
AARCH64_VALID_CALL_TARGET
fmov d2, xzr
ret
.globl abi_test_clobber_d3
.align 4
abi_test_clobber_d3:
AARCH64_VALID_CALL_TARGET
fmov d3, xzr
ret
.globl abi_test_clobber_d4
.align 4
abi_test_clobber_d4:
AARCH64_VALID_CALL_TARGET
fmov d4, xzr
ret
.globl abi_test_clobber_d5
.align 4
abi_test_clobber_d5:
AARCH64_VALID_CALL_TARGET
fmov d5, xzr
ret
.globl abi_test_clobber_d6
.align 4
abi_test_clobber_d6:
AARCH64_VALID_CALL_TARGET
fmov d6, xzr
ret
.globl abi_test_clobber_d7
.align 4
abi_test_clobber_d7:
AARCH64_VALID_CALL_TARGET
fmov d7, xzr
ret
.globl abi_test_clobber_d8
.align 4
abi_test_clobber_d8:
AARCH64_VALID_CALL_TARGET
fmov d8, xzr
ret
.globl abi_test_clobber_d9
.align 4
abi_test_clobber_d9:
AARCH64_VALID_CALL_TARGET
fmov d9, xzr
ret
.globl abi_test_clobber_d10
.align 4
abi_test_clobber_d10:
AARCH64_VALID_CALL_TARGET
fmov d10, xzr
ret
.globl abi_test_clobber_d11
.align 4
abi_test_clobber_d11:
AARCH64_VALID_CALL_TARGET
fmov d11, xzr
ret
.globl abi_test_clobber_d12
.align 4
abi_test_clobber_d12:
AARCH64_VALID_CALL_TARGET
fmov d12, xzr
ret
.globl abi_test_clobber_d13
.align 4
abi_test_clobber_d13:
AARCH64_VALID_CALL_TARGET
fmov d13, xzr
ret
.globl abi_test_clobber_d14
.align 4
abi_test_clobber_d14:
AARCH64_VALID_CALL_TARGET
fmov d14, xzr
ret
.globl abi_test_clobber_d15
.align 4
abi_test_clobber_d15:
AARCH64_VALID_CALL_TARGET
fmov d15, xzr
ret
.globl abi_test_clobber_d16
.align 4
abi_test_clobber_d16:
AARCH64_VALID_CALL_TARGET
fmov d16, xzr
ret
.globl abi_test_clobber_d17
.align 4
abi_test_clobber_d17:
AARCH64_VALID_CALL_TARGET
fmov d17, xzr
ret
.globl abi_test_clobber_d18
.align 4
abi_test_clobber_d18:
AARCH64_VALID_CALL_TARGET
fmov d18, xzr
ret
.globl abi_test_clobber_d19
.align 4
abi_test_clobber_d19:
AARCH64_VALID_CALL_TARGET
fmov d19, xzr
ret
.globl abi_test_clobber_d20
.align 4
abi_test_clobber_d20:
AARCH64_VALID_CALL_TARGET
fmov d20, xzr
ret
.globl abi_test_clobber_d21
.align 4
abi_test_clobber_d21:
AARCH64_VALID_CALL_TARGET
fmov d21, xzr
ret
.globl abi_test_clobber_d22
.align 4
abi_test_clobber_d22:
AARCH64_VALID_CALL_TARGET
fmov d22, xzr
ret
.globl abi_test_clobber_d23
.align 4
abi_test_clobber_d23:
AARCH64_VALID_CALL_TARGET
fmov d23, xzr
ret
.globl abi_test_clobber_d24
.align 4
abi_test_clobber_d24:
AARCH64_VALID_CALL_TARGET
fmov d24, xzr
ret
.globl abi_test_clobber_d25
.align 4
abi_test_clobber_d25:
AARCH64_VALID_CALL_TARGET
fmov d25, xzr
ret
.globl abi_test_clobber_d26
.align 4
abi_test_clobber_d26:
AARCH64_VALID_CALL_TARGET
fmov d26, xzr
ret
.globl abi_test_clobber_d27
.align 4
abi_test_clobber_d27:
AARCH64_VALID_CALL_TARGET
fmov d27, xzr
ret
.globl abi_test_clobber_d28
.align 4
abi_test_clobber_d28:
AARCH64_VALID_CALL_TARGET
fmov d28, xzr
ret
.globl abi_test_clobber_d29
.align 4
abi_test_clobber_d29:
AARCH64_VALID_CALL_TARGET
fmov d29, xzr
ret
.globl abi_test_clobber_d30
.align 4
abi_test_clobber_d30:
AARCH64_VALID_CALL_TARGET
fmov d30, xzr
ret
.globl abi_test_clobber_d31
.align 4
abi_test_clobber_d31:
AARCH64_VALID_CALL_TARGET
fmov d31, xzr
ret
.globl abi_test_clobber_v8_upper
.align 4
abi_test_clobber_v8_upper:
AARCH64_VALID_CALL_TARGET
fmov v8.d[1], xzr
ret
.globl abi_test_clobber_v9_upper
.align 4
abi_test_clobber_v9_upper:
AARCH64_VALID_CALL_TARGET
fmov v9.d[1], xzr
ret
.globl abi_test_clobber_v10_upper
.align 4
abi_test_clobber_v10_upper:
AARCH64_VALID_CALL_TARGET
fmov v10.d[1], xzr
ret
.globl abi_test_clobber_v11_upper
.align 4
abi_test_clobber_v11_upper:
AARCH64_VALID_CALL_TARGET
fmov v11.d[1], xzr
ret
.globl abi_test_clobber_v12_upper
.align 4
abi_test_clobber_v12_upper:
AARCH64_VALID_CALL_TARGET
fmov v12.d[1], xzr
ret
.globl abi_test_clobber_v13_upper
.align 4
abi_test_clobber_v13_upper:
AARCH64_VALID_CALL_TARGET
fmov v13.d[1], xzr
ret
.globl abi_test_clobber_v14_upper
.align 4
abi_test_clobber_v14_upper:
AARCH64_VALID_CALL_TARGET
fmov v14.d[1], xzr
ret
.globl abi_test_clobber_v15_upper
.align 4
abi_test_clobber_v15_upper:
AARCH64_VALID_CALL_TARGET
fmov v15.d[1], xzr
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
weix2025/toy
| 34,147
|
deps/boringssl/win-aarch64/crypto/fipsmodule/sha256-armv8-win.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the OpenSSL license (the "License"). You may not use
// this file except in compliance with the License. You can obtain a copy
// in the file LICENSE in the source distribution or at
// https://www.openssl.org/source/license.html
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project. The module is, however, dual licensed under OpenSSL and
// CRYPTOGAMS licenses depending on where you obtain it. For further
// details see http://www.openssl.org/~appro/cryptogams/.
//
// Permission to use under GPLv2 terms is granted.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
# include <openssl/arm_arch.h>
#endif
.text
.globl sha256_block_data_order
.def sha256_block_data_order
.type 32
.endef
.align 6
sha256_block_data_order:
AARCH64_VALID_CALL_TARGET
#ifndef __KERNEL__
#if defined(OPENSSL_HWASAN) && __clang_major__ >= 10
adrp x16,:pg_hi21_nc:OPENSSL_armcap_P
#else
adrp x16,OPENSSL_armcap_P
#endif
ldr w16,[x16,:lo12:OPENSSL_armcap_P]
tst w16,#ARMV8_SHA256
b.ne Lv8_entry
#endif
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*4
ldp w20,w21,[x0] // load context
ldp w22,w23,[x0,#2*4]
ldp w24,w25,[x0,#4*4]
add x2,x1,x2,lsl#6 // end of input
ldp w26,w27,[x0,#6*4]
adrp x30,LK256
add x30,x30,:lo12:LK256
stp x0,x2,[x29,#96]
Loop:
ldp w3,w4,[x1],#2*4
ldr w19,[x30],#4 // *K++
eor w28,w21,w22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev w3,w3 // 0
#endif
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w6,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w3 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w4,w4 // 1
#endif
ldp w5,w6,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w7,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w4 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w5,w5 // 2
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w8,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w5 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w6,w6 // 3
#endif
ldp w7,w8,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w9,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w6 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w7,w7 // 4
#endif
add w24,w24,w17 // h+=Sigma0(a)
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w10,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w7 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w10,ror#11 // Sigma1(e)
ror w10,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w10,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w8,w8 // 5
#endif
ldp w9,w10,[x1],#2*4
add w23,w23,w17 // h+=Sigma0(a)
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w11,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w8 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w11,ror#11 // Sigma1(e)
ror w11,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w11,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w9,w9 // 6
#endif
add w22,w22,w17 // h+=Sigma0(a)
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w12,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w9 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w12,ror#11 // Sigma1(e)
ror w12,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w12,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w10,w10 // 7
#endif
ldp w11,w12,[x1],#2*4
add w21,w21,w17 // h+=Sigma0(a)
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
eor w13,w25,w25,ror#14
and w17,w26,w25
bic w28,w27,w25
add w20,w20,w10 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w13,ror#11 // Sigma1(e)
ror w13,w21,#2
add w20,w20,w17 // h+=Ch(e,f,g)
eor w17,w21,w21,ror#9
add w20,w20,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w24,w24,w20 // d+=h
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w13,w17,ror#13 // Sigma0(a)
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w20,w20,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w11,w11 // 8
#endif
add w20,w20,w17 // h+=Sigma0(a)
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w14,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w11 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w14,ror#11 // Sigma1(e)
ror w14,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w14,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w12,w12 // 9
#endif
ldp w13,w14,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w15,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w12 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w15,ror#11 // Sigma1(e)
ror w15,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w15,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w13,w13 // 10
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w0,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w13 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w0,ror#11 // Sigma1(e)
ror w0,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w0,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w14,w14 // 11
#endif
ldp w15,w0,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w6,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w14 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w15,w15 // 12
#endif
add w24,w24,w17 // h+=Sigma0(a)
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w7,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w15 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w0,w0 // 13
#endif
ldp w1,w2,[x1]
add w23,w23,w17 // h+=Sigma0(a)
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w8,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w0 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w1,w1 // 14
#endif
ldr w6,[sp,#12]
add w22,w22,w17 // h+=Sigma0(a)
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w9,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w1 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w2,w2 // 15
#endif
ldr w7,[sp,#0]
add w21,w21,w17 // h+=Sigma0(a)
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
Loop_16_xx:
ldr w8,[sp,#4]
str w11,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w10,w5,#7
and w17,w25,w24
ror w9,w2,#17
bic w19,w26,w24
ror w11,w20,#2
add w27,w27,w3 // h+=X[i]
eor w16,w16,w24,ror#11
eor w10,w10,w5,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w11,w11,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w9,w9,w2,ror#19
eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w11,w20,ror#22 // Sigma0(a)
eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
add w4,w4,w13
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w4,w4,w10
add w27,w27,w17 // h+=Sigma0(a)
add w4,w4,w9
ldr w9,[sp,#8]
str w12,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w11,w6,#7
and w17,w24,w23
ror w10,w3,#17
bic w28,w25,w23
ror w12,w27,#2
add w26,w26,w4 // h+=X[i]
eor w16,w16,w23,ror#11
eor w11,w11,w6,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w12,w12,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w10,w10,w3,ror#19
eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w12,w27,ror#22 // Sigma0(a)
eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
add w5,w5,w14
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w5,w5,w11
add w26,w26,w17 // h+=Sigma0(a)
add w5,w5,w10
ldr w10,[sp,#12]
str w13,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w12,w7,#7
and w17,w23,w22
ror w11,w4,#17
bic w19,w24,w22
ror w13,w26,#2
add w25,w25,w5 // h+=X[i]
eor w16,w16,w22,ror#11
eor w12,w12,w7,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w13,w13,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w11,w11,w4,ror#19
eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w13,w26,ror#22 // Sigma0(a)
eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
add w6,w6,w15
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w6,w6,w12
add w25,w25,w17 // h+=Sigma0(a)
add w6,w6,w11
ldr w11,[sp,#0]
str w14,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w13,w8,#7
and w17,w22,w21
ror w12,w5,#17
bic w28,w23,w21
ror w14,w25,#2
add w24,w24,w6 // h+=X[i]
eor w16,w16,w21,ror#11
eor w13,w13,w8,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w14,w14,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w12,w12,w5,ror#19
eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w14,w25,ror#22 // Sigma0(a)
eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
add w7,w7,w0
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w7,w7,w13
add w24,w24,w17 // h+=Sigma0(a)
add w7,w7,w12
ldr w12,[sp,#4]
str w15,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w14,w9,#7
and w17,w21,w20
ror w13,w6,#17
bic w19,w22,w20
ror w15,w24,#2
add w23,w23,w7 // h+=X[i]
eor w16,w16,w20,ror#11
eor w14,w14,w9,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w15,w15,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w13,w13,w6,ror#19
eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w15,w24,ror#22 // Sigma0(a)
eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
add w8,w8,w1
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w8,w8,w14
add w23,w23,w17 // h+=Sigma0(a)
add w8,w8,w13
ldr w13,[sp,#8]
str w0,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w15,w10,#7
and w17,w20,w27
ror w14,w7,#17
bic w28,w21,w27
ror w0,w23,#2
add w22,w22,w8 // h+=X[i]
eor w16,w16,w27,ror#11
eor w15,w15,w10,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w0,w0,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w14,w14,w7,ror#19
eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w0,w23,ror#22 // Sigma0(a)
eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
add w9,w9,w2
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w9,w9,w15
add w22,w22,w17 // h+=Sigma0(a)
add w9,w9,w14
ldr w14,[sp,#12]
str w1,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w0,w11,#7
and w17,w27,w26
ror w15,w8,#17
bic w19,w20,w26
ror w1,w22,#2
add w21,w21,w9 // h+=X[i]
eor w16,w16,w26,ror#11
eor w0,w0,w11,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w1,w1,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w15,w15,w8,ror#19
eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w1,w22,ror#22 // Sigma0(a)
eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
add w10,w10,w3
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w10,w10,w0
add w21,w21,w17 // h+=Sigma0(a)
add w10,w10,w15
ldr w15,[sp,#0]
str w2,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w1,w12,#7
and w17,w26,w25
ror w0,w9,#17
bic w28,w27,w25
ror w2,w21,#2
add w20,w20,w10 // h+=X[i]
eor w16,w16,w25,ror#11
eor w1,w1,w12,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w2,w2,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w0,w0,w9,ror#19
eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w2,w21,ror#22 // Sigma0(a)
eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
add w11,w11,w4
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w11,w11,w1
add w20,w20,w17 // h+=Sigma0(a)
add w11,w11,w0
ldr w0,[sp,#4]
str w3,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w2,w13,#7
and w17,w25,w24
ror w1,w10,#17
bic w19,w26,w24
ror w3,w20,#2
add w27,w27,w11 // h+=X[i]
eor w16,w16,w24,ror#11
eor w2,w2,w13,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w3,w3,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w1,w1,w10,ror#19
eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w3,w20,ror#22 // Sigma0(a)
eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
add w12,w12,w5
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w12,w12,w2
add w27,w27,w17 // h+=Sigma0(a)
add w12,w12,w1
ldr w1,[sp,#8]
str w4,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w3,w14,#7
and w17,w24,w23
ror w2,w11,#17
bic w28,w25,w23
ror w4,w27,#2
add w26,w26,w12 // h+=X[i]
eor w16,w16,w23,ror#11
eor w3,w3,w14,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w4,w4,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w2,w2,w11,ror#19
eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w4,w27,ror#22 // Sigma0(a)
eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
add w13,w13,w6
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w13,w13,w3
add w26,w26,w17 // h+=Sigma0(a)
add w13,w13,w2
ldr w2,[sp,#12]
str w5,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w4,w15,#7
and w17,w23,w22
ror w3,w12,#17
bic w19,w24,w22
ror w5,w26,#2
add w25,w25,w13 // h+=X[i]
eor w16,w16,w22,ror#11
eor w4,w4,w15,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w5,w5,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w3,w3,w12,ror#19
eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w5,w26,ror#22 // Sigma0(a)
eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
add w14,w14,w7
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w14,w14,w4
add w25,w25,w17 // h+=Sigma0(a)
add w14,w14,w3
ldr w3,[sp,#0]
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w5,w0,#7
and w17,w22,w21
ror w4,w13,#17
bic w28,w23,w21
ror w6,w25,#2
add w24,w24,w14 // h+=X[i]
eor w16,w16,w21,ror#11
eor w5,w5,w0,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w6,w6,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w4,w4,w13,ror#19
eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w25,ror#22 // Sigma0(a)
eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
add w15,w15,w8
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w15,w15,w5
add w24,w24,w17 // h+=Sigma0(a)
add w15,w15,w4
ldr w4,[sp,#4]
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w6,w1,#7
and w17,w21,w20
ror w5,w14,#17
bic w19,w22,w20
ror w7,w24,#2
add w23,w23,w15 // h+=X[i]
eor w16,w16,w20,ror#11
eor w6,w6,w1,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w7,w7,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w5,w5,w14,ror#19
eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w24,ror#22 // Sigma0(a)
eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
add w0,w0,w9
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w0,w0,w6
add w23,w23,w17 // h+=Sigma0(a)
add w0,w0,w5
ldr w5,[sp,#8]
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w7,w2,#7
and w17,w20,w27
ror w6,w15,#17
bic w28,w21,w27
ror w8,w23,#2
add w22,w22,w0 // h+=X[i]
eor w16,w16,w27,ror#11
eor w7,w7,w2,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w8,w8,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w6,w6,w15,ror#19
eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w23,ror#22 // Sigma0(a)
eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
add w1,w1,w10
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w1,w1,w7
add w22,w22,w17 // h+=Sigma0(a)
add w1,w1,w6
ldr w6,[sp,#12]
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w8,w3,#7
and w17,w27,w26
ror w7,w0,#17
bic w19,w20,w26
ror w9,w22,#2
add w21,w21,w1 // h+=X[i]
eor w16,w16,w26,ror#11
eor w8,w8,w3,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w9,w9,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w7,w7,w0,ror#19
eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w22,ror#22 // Sigma0(a)
eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
add w2,w2,w11
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w2,w2,w8
add w21,w21,w17 // h+=Sigma0(a)
add w2,w2,w7
ldr w7,[sp,#0]
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
cbnz w19,Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#260 // rewind
ldp w3,w4,[x0]
ldp w5,w6,[x0,#2*4]
add x1,x1,#14*4 // advance input pointer
ldp w7,w8,[x0,#4*4]
add w20,w20,w3
ldp w9,w10,[x0,#6*4]
add w21,w21,w4
add w22,w22,w5
add w23,w23,w6
stp w20,w21,[x0]
add w24,w24,w7
add w25,w25,w8
stp w22,w23,[x0,#2*4]
add w26,w26,w9
add w27,w27,w10
cmp x1,x2
stp w24,w25,[x0,#4*4]
stp w26,w27,[x0,#6*4]
b.ne Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*4
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.section .rodata
.align 6
LK256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0 //terminator
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.def sha256_block_armv8
.type 32
.endef
.align 6
sha256_block_armv8:
Lv8_entry:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v0.4s,v1.4s},[x0]
adrp x3,LK256
add x3,x3,:lo12:LK256
Loop_hw:
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
sub x2,x2,#1
ld1 {v16.4s},[x3],#16
rev32 v4.16b,v4.16b
rev32 v5.16b,v5.16b
rev32 v6.16b,v6.16b
rev32 v7.16b,v7.16b
orr v18.16b,v0.16b,v0.16b // offload
orr v19.16b,v1.16b,v1.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
ld1 {v17.4s},[x3]
add v16.4s,v16.4s,v6.4s
sub x3,x3,#64*4-16 // rewind
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
add v17.4s,v17.4s,v7.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
add v0.4s,v0.4s,v18.4s
add v1.4s,v1.4s,v19.4s
cbnz x2,Loop_hw
st1 {v0.4s,v1.4s},[x0]
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
weix2025/toy
| 14,519
|
deps/boringssl/win-aarch64/crypto/fipsmodule/ghashv8-armv8-win.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.globl gcm_init_v8
.def gcm_init_v8
.type 32
.endef
.align 4
gcm_init_v8:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x1] //load input H
movi v19.16b,#0xe1
shl v19.2d,v19.2d,#57 //0xc2.0
ext v3.16b,v17.16b,v17.16b,#8
ushr v18.2d,v19.2d,#63
dup v17.4s,v17.s[1]
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
ushr v18.2d,v3.2d,#63
sshr v17.4s,v17.4s,#31 //broadcast carry bit
and v18.16b,v18.16b,v16.16b
shl v3.2d,v3.2d,#1
ext v18.16b,v18.16b,v18.16b,#8
and v16.16b,v16.16b,v17.16b
orr v3.16b,v3.16b,v18.16b //H<<<=1
eor v20.16b,v3.16b,v16.16b //twisted H
st1 {v20.2d},[x0],#16 //store Htable[0]
//calculate H^2
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
pmull v0.1q,v20.1d,v20.1d
eor v16.16b,v16.16b,v20.16b
pmull2 v2.1q,v20.2d,v20.2d
pmull v1.1q,v16.1d,v16.1d
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v22.16b,v0.16b,v18.16b
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
//calculate H^3 and H^4
pmull v0.1q,v20.1d, v22.1d
pmull v5.1q,v22.1d,v22.1d
pmull2 v2.1q,v20.2d, v22.2d
pmull2 v7.1q,v22.2d,v22.2d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v20.16b, v0.16b,v18.16b //H^3
eor v22.16b,v5.16b,v4.16b //H^4
ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
ext v17.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v20.16b
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
ret
.globl gcm_gmult_v8
.def gcm_gmult_v8
.type 32
.endef
.align 4
gcm_gmult_v8:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x0] //load Xi
movi v19.16b,#0xe1
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
shl v19.2d,v19.2d,#57
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v3.16b,v17.16b,v17.16b,#8
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.globl gcm_ghash_v8
.def gcm_ghash_v8
.type 32
.endef
.align 4
gcm_ghash_v8:
AARCH64_VALID_CALL_TARGET
cmp x3,#64
b.hs Lgcm_ghash_v8_4x
ld1 {v0.2d},[x0] //load [rotated] Xi
//"[rotated]" means that
//loaded value would have
//to be rotated in order to
//make it appear as in
//algorithm specification
subs x3,x3,#32 //see if x3 is 32 or larger
mov x12,#16 //x12 is used as post-
//increment for input pointer;
//as loop is modulo-scheduled
//x12 is zeroed just in time
//to preclude overstepping
//inp[len], which means that
//last block[s] are actually
//loaded twice, but last
//copy is not processed
ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2
movi v19.16b,#0xe1
ld1 {v22.2d},[x1]
csel x12,xzr,x12,eq //is it time to zero x12?
ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi
ld1 {v16.2d},[x2],#16 //load [rotated] I[0]
shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
#ifndef __AARCH64EB__
rev64 v16.16b,v16.16b
rev64 v0.16b,v0.16b
#endif
ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0]
b.lo Lodd_tail_v8 //x3 was less than 32
ld1 {v17.2d},[x2],x12 //load [rotated] I[1]
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v7.16b,v17.16b,v17.16b,#8
eor v3.16b,v3.16b,v0.16b //I[i]^=Xi
pmull v4.1q,v20.1d,v7.1d //H·Ii+1
eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
pmull2 v6.1q,v20.2d,v7.2d
b Loop_mod2x_v8
.align 4
Loop_mod2x_v8:
ext v18.16b,v3.16b,v3.16b,#8
subs x3,x3,#32 //is there more data?
pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo
csel x12,xzr,x12,lo //is it time to zero x12?
pmull v5.1q,v21.1d,v17.1d
eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi
eor v0.16b,v0.16b,v4.16b //accumulate
pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2]
eor v2.16b,v2.16b,v6.16b
csel x12,xzr,x12,eq //is it time to zero x12?
eor v1.16b,v1.16b,v5.16b
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3]
#ifndef __AARCH64EB__
rev64 v16.16b,v16.16b
#endif
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
ext v7.16b,v17.16b,v17.16b,#8
ext v3.16b,v16.16b,v16.16b,#8
eor v0.16b,v1.16b,v18.16b
pmull v4.1q,v20.1d,v7.1d //H·Ii+1
eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v3.16b,v3.16b,v18.16b
eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
eor v3.16b,v3.16b,v0.16b
pmull2 v6.1q,v20.2d,v7.2d
b.hs Loop_mod2x_v8 //there was at least 32 more bytes
eor v2.16b,v2.16b,v18.16b
ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b
adds x3,x3,#32 //re-construct x3
eor v0.16b,v0.16b,v2.16b //re-construct v0.16b
b.eq Ldone_v8 //is x3 zero?
Lodd_tail_v8:
ext v18.16b,v0.16b,v0.16b,#8
eor v3.16b,v3.16b,v0.16b //inp^=Xi
eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
Ldone_v8:
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.def gcm_ghash_v8_4x
.type 32
.endef
.align 4
gcm_ghash_v8_4x:
Lgcm_ghash_v8_4x:
ld1 {v0.2d},[x0] //load [rotated] Xi
ld1 {v20.2d,v21.2d,v22.2d},[x1],#48 //load twisted H, ..., H^2
movi v19.16b,#0xe1
ld1 {v26.2d,v27.2d,v28.2d},[x1] //load twisted H^3, ..., H^4
shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v7.16b,v7.16b
rev64 v4.16b,v4.16b
#endif
ext v25.16b,v7.16b,v7.16b,#8
ext v24.16b,v6.16b,v6.16b,#8
ext v23.16b,v5.16b,v5.16b,#8
pmull v29.1q,v20.1d,v25.1d //H·Ii+3
eor v7.16b,v7.16b,v25.16b
pmull2 v31.1q,v20.2d,v25.2d
pmull v30.1q,v21.1d,v7.1d
pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2
eor v6.16b,v6.16b,v24.16b
pmull2 v24.1q,v22.2d,v24.2d
pmull2 v6.1q,v21.2d,v6.2d
eor v29.16b,v29.16b,v16.16b
eor v31.16b,v31.16b,v24.16b
eor v30.16b,v30.16b,v6.16b
pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1
eor v5.16b,v5.16b,v23.16b
pmull2 v23.1q,v26.2d,v23.2d
pmull v5.1q,v27.1d,v5.1d
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
eor v30.16b,v30.16b,v5.16b
subs x3,x3,#128
b.lo Ltail4x
b Loop4x
.align 4
Loop4x:
eor v16.16b,v4.16b,v0.16b
ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
ext v3.16b,v16.16b,v16.16b,#8
#ifndef __AARCH64EB__
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v7.16b,v7.16b
rev64 v4.16b,v4.16b
#endif
pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v28.2d,v3.2d
ext v25.16b,v7.16b,v7.16b,#8
pmull2 v1.1q,v27.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
ext v24.16b,v6.16b,v6.16b,#8
eor v1.16b,v1.16b,v30.16b
ext v23.16b,v5.16b,v5.16b,#8
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
pmull v29.1q,v20.1d,v25.1d //H·Ii+3
eor v7.16b,v7.16b,v25.16b
eor v1.16b,v1.16b,v17.16b
pmull2 v31.1q,v20.2d,v25.2d
eor v1.16b,v1.16b,v18.16b
pmull v30.1q,v21.1d,v7.1d
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2
eor v6.16b,v6.16b,v24.16b
pmull2 v24.1q,v22.2d,v24.2d
eor v0.16b,v1.16b,v18.16b
pmull2 v6.1q,v21.2d,v6.2d
eor v29.16b,v29.16b,v16.16b
eor v31.16b,v31.16b,v24.16b
eor v30.16b,v30.16b,v6.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1
eor v5.16b,v5.16b,v23.16b
eor v18.16b,v18.16b,v2.16b
pmull2 v23.1q,v26.2d,v23.2d
pmull v5.1q,v27.1d,v5.1d
eor v0.16b,v0.16b,v18.16b
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
ext v0.16b,v0.16b,v0.16b,#8
eor v30.16b,v30.16b,v5.16b
subs x3,x3,#64
b.hs Loop4x
Ltail4x:
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v28.2d,v3.2d
pmull2 v1.1q,v27.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
adds x3,x3,#64
b.eq Ldone4x
cmp x3,#32
b.lo Lone
b.eq Ltwo
Lthree:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d,v5.2d,v6.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
ext v24.16b,v6.16b,v6.16b,#8
ext v23.16b,v5.16b,v5.16b,#8
eor v0.16b,v1.16b,v18.16b
pmull v29.1q,v20.1d,v24.1d //H·Ii+2
eor v6.16b,v6.16b,v24.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
pmull2 v31.1q,v20.2d,v24.2d
pmull v30.1q,v21.1d,v6.1d
eor v0.16b,v0.16b,v18.16b
pmull v7.1q,v22.1d,v23.1d //H^2·Ii+1
eor v5.16b,v5.16b,v23.16b
ext v0.16b,v0.16b,v0.16b,#8
pmull2 v23.1q,v22.2d,v23.2d
eor v16.16b,v4.16b,v0.16b
pmull2 v5.1q,v21.2d,v5.2d
ext v3.16b,v16.16b,v16.16b,#8
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
eor v30.16b,v30.16b,v5.16b
pmull v0.1q,v26.1d,v3.1d //H^3·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v26.2d,v3.2d
pmull v1.1q,v27.1d,v16.1d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
b Ldone4x
.align 4
Ltwo:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d,v5.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v5.16b,v5.16b
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
ext v23.16b,v5.16b,v5.16b,#8
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
pmull v29.1q,v20.1d,v23.1d //H·Ii+1
eor v5.16b,v5.16b,v23.16b
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull2 v31.1q,v20.2d,v23.2d
pmull v30.1q,v21.1d,v5.1d
pmull v0.1q,v22.1d,v3.1d //H^2·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v22.2d,v3.2d
pmull2 v1.1q,v21.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
b Ldone4x
.align 4
Lone:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull v0.1q,v20.1d,v3.1d
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v20.2d,v3.2d
pmull v1.1q,v21.1d,v16.1d
Ldone4x:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
st1 {v0.2d},[x0] //write out Xi
ret
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
weix2025/toy
| 16,139
|
deps/boringssl/win-aarch64/crypto/fipsmodule/aesv8-armv8-win.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.section .rodata
.align 5
Lrcon:
.long 0x01,0x01,0x01,0x01
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
.long 0x1b,0x1b,0x1b,0x1b
.text
.globl aes_hw_set_encrypt_key
.def aes_hw_set_encrypt_key
.type 32
.endef
.align 5
aes_hw_set_encrypt_key:
Lenc_key:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x3,#-1
cmp x0,#0
b.eq Lenc_key_abort
cmp x2,#0
b.eq Lenc_key_abort
mov x3,#-2
cmp w1,#128
b.lt Lenc_key_abort
cmp w1,#256
b.gt Lenc_key_abort
tst w1,#0x3f
b.ne Lenc_key_abort
adrp x3,Lrcon
add x3,x3,:lo12:Lrcon
cmp w1,#192
eor v0.16b,v0.16b,v0.16b
ld1 {v3.16b},[x0],#16
mov w1,#8 // reuse w1
ld1 {v1.4s,v2.4s},[x3],#32
b.lt Loop128
b.eq L192
b L256
.align 4
Loop128:
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
b.ne Loop128
ld1 {v1.4s},[x3]
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2]
add x2,x2,#0x50
mov w12,#10
b Ldone
.align 4
L192:
ld1 {v4.8b},[x0],#8
movi v6.16b,#8 // borrow v6.16b
st1 {v3.4s},[x2],#16
sub v2.16b,v2.16b,v6.16b // adjust the mask
Loop192:
tbl v6.16b,{v4.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v4.8b},[x2],#8
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
dup v5.4s,v3.s[3]
eor v5.16b,v5.16b,v4.16b
eor v6.16b,v6.16b,v1.16b
ext v4.16b,v0.16b,v4.16b,#12
shl v1.16b,v1.16b,#1
eor v4.16b,v4.16b,v5.16b
eor v3.16b,v3.16b,v6.16b
eor v4.16b,v4.16b,v6.16b
st1 {v3.4s},[x2],#16
b.ne Loop192
mov w12,#12
add x2,x2,#0x20
b Ldone
.align 4
L256:
ld1 {v4.16b},[x0]
mov w1,#7
mov w12,#14
st1 {v3.4s},[x2],#16
Loop256:
tbl v6.16b,{v4.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v4.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2],#16
b.eq Ldone
dup v6.4s,v3.s[3] // just splat
ext v5.16b,v0.16b,v4.16b,#12
aese v6.16b,v0.16b
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
eor v4.16b,v4.16b,v6.16b
b Loop256
Ldone:
str w12,[x2]
mov x3,#0
Lenc_key_abort:
mov x0,x3 // return value
ldr x29,[sp],#16
ret
.globl aes_hw_set_decrypt_key
.def aes_hw_set_decrypt_key
.type 32
.endef
.align 5
aes_hw_set_decrypt_key:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
bl Lenc_key
cmp x0,#0
b.ne Ldec_key_abort
sub x2,x2,#240 // restore original x2
mov x4,#-16
add x0,x2,x12,lsl#4 // end of key schedule
ld1 {v0.4s},[x2]
ld1 {v1.4s},[x0]
st1 {v0.4s},[x0],x4
st1 {v1.4s},[x2],#16
Loop_imc:
ld1 {v0.4s},[x2]
ld1 {v1.4s},[x0]
aesimc v0.16b,v0.16b
aesimc v1.16b,v1.16b
st1 {v0.4s},[x0],x4
st1 {v1.4s},[x2],#16
cmp x0,x2
b.hi Loop_imc
ld1 {v0.4s},[x2]
aesimc v0.16b,v0.16b
st1 {v0.4s},[x0]
eor x0,x0,x0 // return value
Ldec_key_abort:
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl aes_hw_encrypt
.def aes_hw_encrypt
.type 32
.endef
.align 5
aes_hw_encrypt:
AARCH64_VALID_CALL_TARGET
ldr w3,[x2,#240]
ld1 {v0.4s},[x2],#16
ld1 {v2.16b},[x0]
sub w3,w3,#2
ld1 {v1.4s},[x2],#16
Loop_enc:
aese v2.16b,v0.16b
aesmc v2.16b,v2.16b
ld1 {v0.4s},[x2],#16
subs w3,w3,#2
aese v2.16b,v1.16b
aesmc v2.16b,v2.16b
ld1 {v1.4s},[x2],#16
b.gt Loop_enc
aese v2.16b,v0.16b
aesmc v2.16b,v2.16b
ld1 {v0.4s},[x2]
aese v2.16b,v1.16b
eor v2.16b,v2.16b,v0.16b
st1 {v2.16b},[x1]
ret
.globl aes_hw_decrypt
.def aes_hw_decrypt
.type 32
.endef
.align 5
aes_hw_decrypt:
AARCH64_VALID_CALL_TARGET
ldr w3,[x2,#240]
ld1 {v0.4s},[x2],#16
ld1 {v2.16b},[x0]
sub w3,w3,#2
ld1 {v1.4s},[x2],#16
Loop_dec:
aesd v2.16b,v0.16b
aesimc v2.16b,v2.16b
ld1 {v0.4s},[x2],#16
subs w3,w3,#2
aesd v2.16b,v1.16b
aesimc v2.16b,v2.16b
ld1 {v1.4s},[x2],#16
b.gt Loop_dec
aesd v2.16b,v0.16b
aesimc v2.16b,v2.16b
ld1 {v0.4s},[x2]
aesd v2.16b,v1.16b
eor v2.16b,v2.16b,v0.16b
st1 {v2.16b},[x1]
ret
.globl aes_hw_cbc_encrypt
.def aes_hw_cbc_encrypt
.type 32
.endef
.align 5
aes_hw_cbc_encrypt:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
subs x2,x2,#16
mov x8,#16
b.lo Lcbc_abort
csel x8,xzr,x8,eq
cmp w5,#0 // en- or decrypting?
ldr w5,[x3,#240]
and x2,x2,#-16
ld1 {v6.16b},[x4]
ld1 {v0.16b},[x0],x8
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#6
add x7,x3,x5,lsl#4 // pointer to last 7 round keys
sub w5,w5,#2
ld1 {v18.4s,v19.4s},[x7],#32
ld1 {v20.4s,v21.4s},[x7],#32
ld1 {v22.4s,v23.4s},[x7],#32
ld1 {v7.4s},[x7]
add x7,x3,#32
mov w6,w5
b.eq Lcbc_dec
cmp w5,#2
eor v0.16b,v0.16b,v6.16b
eor v5.16b,v16.16b,v7.16b
b.eq Lcbc_enc128
ld1 {v2.4s,v3.4s},[x7]
add x7,x3,#16
add x6,x3,#16*4
add x12,x3,#16*5
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
add x14,x3,#16*6
add x3,x3,#16*7
b Lenter_cbc_enc
.align 4
Loop_cbc_enc:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
st1 {v6.16b},[x1],#16
Lenter_cbc_enc:
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v0.16b,v2.16b
aesmc v0.16b,v0.16b
ld1 {v16.4s},[x6]
cmp w5,#4
aese v0.16b,v3.16b
aesmc v0.16b,v0.16b
ld1 {v17.4s},[x12]
b.eq Lcbc_enc192
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
ld1 {v16.4s},[x14]
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
ld1 {v17.4s},[x3]
nop
Lcbc_enc192:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
subs x2,x2,#16
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
csel x8,xzr,x8,eq
aese v0.16b,v18.16b
aesmc v0.16b,v0.16b
aese v0.16b,v19.16b
aesmc v0.16b,v0.16b
ld1 {v16.16b},[x0],x8
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
eor v16.16b,v16.16b,v5.16b
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
ld1 {v17.4s},[x7] // re-pre-load rndkey[1]
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v0.16b,v23.16b
eor v6.16b,v0.16b,v7.16b
b.hs Loop_cbc_enc
st1 {v6.16b},[x1],#16
b Lcbc_done
.align 5
Lcbc_enc128:
ld1 {v2.4s,v3.4s},[x7]
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
b Lenter_cbc_enc128
Loop_cbc_enc128:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
st1 {v6.16b},[x1],#16
Lenter_cbc_enc128:
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
subs x2,x2,#16
aese v0.16b,v2.16b
aesmc v0.16b,v0.16b
csel x8,xzr,x8,eq
aese v0.16b,v3.16b
aesmc v0.16b,v0.16b
aese v0.16b,v18.16b
aesmc v0.16b,v0.16b
aese v0.16b,v19.16b
aesmc v0.16b,v0.16b
ld1 {v16.16b},[x0],x8
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
eor v16.16b,v16.16b,v5.16b
aese v0.16b,v23.16b
eor v6.16b,v0.16b,v7.16b
b.hs Loop_cbc_enc128
st1 {v6.16b},[x1],#16
b Lcbc_done
.align 5
Lcbc_dec:
ld1 {v18.16b},[x0],#16
subs x2,x2,#32 // bias
add w6,w5,#2
orr v3.16b,v0.16b,v0.16b
orr v1.16b,v0.16b,v0.16b
orr v19.16b,v18.16b,v18.16b
b.lo Lcbc_dec_tail
orr v1.16b,v18.16b,v18.16b
ld1 {v18.16b},[x0],#16
orr v2.16b,v0.16b,v0.16b
orr v3.16b,v1.16b,v1.16b
orr v19.16b,v18.16b,v18.16b
Loop3x_cbc_dec:
aesd v0.16b,v16.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v16.16b
aesimc v18.16b,v18.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aesd v0.16b,v17.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v17.16b
aesimc v18.16b,v18.16b
ld1 {v17.4s},[x7],#16
b.gt Loop3x_cbc_dec
aesd v0.16b,v16.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v16.16b
aesimc v18.16b,v18.16b
eor v4.16b,v6.16b,v7.16b
subs x2,x2,#0x30
eor v5.16b,v2.16b,v7.16b
csel x6,x2,x6,lo // x6, w6, is zero at this point
aesd v0.16b,v17.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v17.16b
aesimc v18.16b,v18.16b
eor v17.16b,v3.16b,v7.16b
add x0,x0,x6 // x0 is adjusted in such way that
// at exit from the loop v1.16b-v18.16b
// are loaded with last "words"
orr v6.16b,v19.16b,v19.16b
mov x7,x3
aesd v0.16b,v20.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v20.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v20.16b
aesimc v18.16b,v18.16b
ld1 {v2.16b},[x0],#16
aesd v0.16b,v21.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v21.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v21.16b
aesimc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16
aesd v0.16b,v22.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v22.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v22.16b
aesimc v18.16b,v18.16b
ld1 {v19.16b},[x0],#16
aesd v0.16b,v23.16b
aesd v1.16b,v23.16b
aesd v18.16b,v23.16b
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
add w6,w5,#2
eor v4.16b,v4.16b,v0.16b
eor v5.16b,v5.16b,v1.16b
eor v18.16b,v18.16b,v17.16b
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v4.16b},[x1],#16
orr v0.16b,v2.16b,v2.16b
st1 {v5.16b},[x1],#16
orr v1.16b,v3.16b,v3.16b
st1 {v18.16b},[x1],#16
orr v18.16b,v19.16b,v19.16b
b.hs Loop3x_cbc_dec
cmn x2,#0x30
b.eq Lcbc_done
nop
Lcbc_dec_tail:
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v16.16b
aesimc v18.16b,v18.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v17.16b
aesimc v18.16b,v18.16b
ld1 {v17.4s},[x7],#16
b.gt Lcbc_dec_tail
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v16.16b
aesimc v18.16b,v18.16b
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v17.16b
aesimc v18.16b,v18.16b
aesd v1.16b,v20.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v20.16b
aesimc v18.16b,v18.16b
cmn x2,#0x20
aesd v1.16b,v21.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v21.16b
aesimc v18.16b,v18.16b
eor v5.16b,v6.16b,v7.16b
aesd v1.16b,v22.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v22.16b
aesimc v18.16b,v18.16b
eor v17.16b,v3.16b,v7.16b
aesd v1.16b,v23.16b
aesd v18.16b,v23.16b
b.eq Lcbc_dec_one
eor v5.16b,v5.16b,v1.16b
eor v17.16b,v17.16b,v18.16b
orr v6.16b,v19.16b,v19.16b
st1 {v5.16b},[x1],#16
st1 {v17.16b},[x1],#16
b Lcbc_done
Lcbc_dec_one:
eor v5.16b,v5.16b,v18.16b
orr v6.16b,v19.16b,v19.16b
st1 {v5.16b},[x1],#16
Lcbc_done:
st1 {v6.16b},[x4]
Lcbc_abort:
ldr x29,[sp],#16
ret
.globl aes_hw_ctr32_encrypt_blocks
.def aes_hw_ctr32_encrypt_blocks
.type 32
.endef
.align 5
aes_hw_ctr32_encrypt_blocks:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ldr w5,[x3,#240]
ldr w8, [x4, #12]
ld1 {v0.4s},[x4]
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#4
mov x12,#16
cmp x2,#2
add x7,x3,x5,lsl#4 // pointer to last 5 round keys
sub w5,w5,#2
ld1 {v20.4s,v21.4s},[x7],#32
ld1 {v22.4s,v23.4s},[x7],#32
ld1 {v7.4s},[x7]
add x7,x3,#32
mov w6,w5
csel x12,xzr,x12,lo
// ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
// affected by silicon errata #1742098 [0] and #1655431 [1],
// respectively, where the second instruction of an aese/aesmc
// instruction pair may execute twice if an interrupt is taken right
// after the first instruction consumes an input register of which a
// single 32-bit lane has been updated the last time it was modified.
//
// This function uses a counter in one 32-bit lane. The vmov lines
// could write to v1.16b and v18.16b directly, but that trips this bugs.
// We write to v6.16b and copy to the final register as a workaround.
//
// [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
// [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __AARCH64EB__
rev w8, w8
#endif
add w10, w8, #1
orr v6.16b,v0.16b,v0.16b
rev w10, w10
mov v6.s[3],w10
add w8, w8, #2
orr v1.16b,v6.16b,v6.16b
b.ls Lctr32_tail
rev w12, w8
mov v6.s[3],w12
sub x2,x2,#3 // bias
orr v18.16b,v6.16b,v6.16b
b Loop3x_ctr32
.align 4
Loop3x_ctr32:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v18.16b,v17.16b
aesmc v18.16b,v18.16b
ld1 {v17.4s},[x7],#16
b.gt Loop3x_ctr32
aese v0.16b,v16.16b
aesmc v4.16b,v0.16b
aese v1.16b,v16.16b
aesmc v5.16b,v1.16b
ld1 {v2.16b},[x0],#16
add w9,w8,#1
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16
rev w9,w9
aese v4.16b,v17.16b
aesmc v4.16b,v4.16b
aese v5.16b,v17.16b
aesmc v5.16b,v5.16b
ld1 {v19.16b},[x0],#16
mov x7,x3
aese v18.16b,v17.16b
aesmc v17.16b,v18.16b
aese v4.16b,v20.16b
aesmc v4.16b,v4.16b
aese v5.16b,v20.16b
aesmc v5.16b,v5.16b
eor v2.16b,v2.16b,v7.16b
add w10,w8,#2
aese v17.16b,v20.16b
aesmc v17.16b,v17.16b
eor v3.16b,v3.16b,v7.16b
add w8,w8,#3
aese v4.16b,v21.16b
aesmc v4.16b,v4.16b
aese v5.16b,v21.16b
aesmc v5.16b,v5.16b
// Note the logic to update v0.16b, v1.16b, and v1.16b is written to work
// around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
// 32-bit mode. See the comment above.
eor v19.16b,v19.16b,v7.16b
mov v6.s[3], w9
aese v17.16b,v21.16b
aesmc v17.16b,v17.16b
orr v0.16b,v6.16b,v6.16b
rev w10,w10
aese v4.16b,v22.16b
aesmc v4.16b,v4.16b
mov v6.s[3], w10
rev w12,w8
aese v5.16b,v22.16b
aesmc v5.16b,v5.16b
orr v1.16b,v6.16b,v6.16b
mov v6.s[3], w12
aese v17.16b,v22.16b
aesmc v17.16b,v17.16b
orr v18.16b,v6.16b,v6.16b
subs x2,x2,#3
aese v4.16b,v23.16b
aese v5.16b,v23.16b
aese v17.16b,v23.16b
eor v2.16b,v2.16b,v4.16b
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
st1 {v2.16b},[x1],#16
eor v3.16b,v3.16b,v5.16b
mov w6,w5
st1 {v3.16b},[x1],#16
eor v19.16b,v19.16b,v17.16b
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v19.16b},[x1],#16
b.hs Loop3x_ctr32
adds x2,x2,#3
b.eq Lctr32_done
cmp x2,#1
mov x12,#16
csel x12,xzr,x12,eq
Lctr32_tail:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v17.4s},[x7],#16
b.gt Lctr32_tail
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v2.16b},[x0],x12
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v1.16b,v20.16b
aesmc v1.16b,v1.16b
ld1 {v3.16b},[x0]
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
aese v1.16b,v21.16b
aesmc v1.16b,v1.16b
eor v2.16b,v2.16b,v7.16b
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v1.16b,v22.16b
aesmc v1.16b,v1.16b
eor v3.16b,v3.16b,v7.16b
aese v0.16b,v23.16b
aese v1.16b,v23.16b
cmp x2,#1
eor v2.16b,v2.16b,v0.16b
eor v3.16b,v3.16b,v1.16b
st1 {v2.16b},[x1],#16
b.eq Lctr32_done
st1 {v3.16b},[x1]
Lctr32_done:
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
weix2025/toy
| 31,029
|
deps/boringssl/win-aarch64/crypto/fipsmodule/armv8-mont-win.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
.text
.globl bn_mul_mont
.def bn_mul_mont
.type 32
.endef
.align 5
bn_mul_mont:
AARCH64_SIGN_LINK_REGISTER
tst x5,#7
b.eq __bn_sqr8x_mont
tst x5,#3
b.eq __bn_mul4x_mont
Lmul_mont:
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
ldr x9,[x2],#8 // bp[0]
sub x22,sp,x5,lsl#3
ldp x7,x8,[x1],#16 // ap[0..1]
lsl x5,x5,#3
ldr x4,[x4] // *n0
and x22,x22,#-16 // ABI says so
ldp x13,x14,[x3],#16 // np[0..1]
mul x6,x7,x9 // ap[0]*bp[0]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
mul x10,x8,x9 // ap[1]*bp[0]
umulh x11,x8,x9
mul x15,x6,x4 // "tp[0]"*n0
mov sp,x22 // alloca
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6 // discarded
// (*) As for removal of first multiplication and addition
// instructions. The outcome of first addition is
// guaranteed to be zero, which leaves two computationally
// significant outcomes: it either carries or not. Then
// question is when does it carry? Is there alternative
// way to deduce it? If you follow operations, you can
// observe that condition for carry is quite simple:
// x6 being non-zero. So that carry can be calculated
// by adding -1 to x6. That's what next instruction does.
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
adc x13,x13,xzr
cbz x21,L1st_skip
L1st:
ldr x8,[x1],#8
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
ldr x14,[x3],#8
adds x12,x16,x13
mul x10,x8,x9 // ap[j]*bp[0]
adc x13,x17,xzr
umulh x11,x8,x9
adds x12,x12,x6
mul x16,x14,x15 // np[j]*m1
adc x13,x13,xzr
umulh x17,x14,x15
str x12,[x22],#8 // tp[j-1]
cbnz x21,L1st
L1st_skip:
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adc x13,x17,xzr
adds x12,x12,x6
sub x20,x5,#8 // i=num-1
adcs x13,x13,x7
adc x19,xzr,xzr // upmost overflow bit
stp x12,x13,[x22]
Louter:
ldr x9,[x2],#8 // bp[i]
ldp x7,x8,[x1],#16
ldr x23,[sp] // tp[0]
add x22,sp,#8
mul x6,x7,x9 // ap[0]*bp[i]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
ldp x13,x14,[x3],#16
mul x10,x8,x9 // ap[1]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x15,x6,x4
sub x20,x20,#8 // i--
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
cbz x21,Linner_skip
Linner:
ldr x8,[x1],#8
adc x13,x13,xzr
ldr x23,[x22],#8 // tp[j]
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
adds x12,x16,x13
ldr x14,[x3],#8
adc x13,x17,xzr
mul x10,x8,x9 // ap[j]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x16,x14,x15 // np[j]*m1
adds x12,x12,x6
umulh x17,x14,x15
str x12,[x22,#-16] // tp[j-1]
cbnz x21,Linner
Linner_skip:
ldr x23,[x22],#8 // tp[j]
adc x13,x13,xzr
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adcs x13,x17,x19
adc x19,xzr,xzr
adds x6,x6,x23
adc x7,x7,xzr
adds x12,x12,x6
adcs x13,x13,x7
adc x19,x19,xzr // upmost overflow bit
stp x12,x13,[x22,#-16]
cbnz x20,Louter
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x14,[x3],#8 // np[0]
subs x21,x5,#8 // j=num-1 and clear borrow
mov x1,x0
Lsub:
sbcs x8,x23,x14 // tp[j]-np[j]
ldr x23,[x22],#8
sub x21,x21,#8 // j--
ldr x14,[x3],#8
str x8,[x1],#8 // rp[j]=tp[j]-np[j]
cbnz x21,Lsub
sbcs x8,x23,x14
sbcs x19,x19,xzr // did it borrow?
str x8,[x1],#8 // rp[num-1]
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x8,[x0],#8 // rp[0]
sub x5,x5,#8 // num--
nop
Lcond_copy:
sub x5,x5,#8 // num--
csel x14,x23,x8,lo // did it borrow?
ldr x23,[x22],#8
ldr x8,[x0],#8
str xzr,[x22,#-16] // wipe tp
str x14,[x0,#-16]
cbnz x5,Lcond_copy
csel x14,x23,x8,lo
str xzr,[x22,#-8] // wipe tp
str x14,[x0,#-8]
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldr x29,[sp],#64
AARCH64_VALIDATE_LINK_REGISTER
ret
.def __bn_sqr8x_mont
.type 32
.endef
.align 5
__bn_sqr8x_mont:
// Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_sqr8x_mont is jumped to
// only from bn_mul_mont which has already signed the return address.
cmp x1,x2
b.ne __bn_mul4x_mont
Lsqr8x_mont:
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
stp x0,x3,[sp,#96] // offload rp and np
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
ldp x12,x13,[x1,#8*6]
sub x2,sp,x5,lsl#4
lsl x5,x5,#3
ldr x4,[x4] // *n0
mov sp,x2 // alloca
sub x27,x5,#8*8
b Lsqr8x_zero_start
Lsqr8x_zero:
sub x27,x27,#8*8
stp xzr,xzr,[x2,#8*0]
stp xzr,xzr,[x2,#8*2]
stp xzr,xzr,[x2,#8*4]
stp xzr,xzr,[x2,#8*6]
Lsqr8x_zero_start:
stp xzr,xzr,[x2,#8*8]
stp xzr,xzr,[x2,#8*10]
stp xzr,xzr,[x2,#8*12]
stp xzr,xzr,[x2,#8*14]
add x2,x2,#8*16
cbnz x27,Lsqr8x_zero
add x3,x1,x5
add x1,x1,#8*8
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
mov x23,xzr
mov x24,xzr
mov x25,xzr
mov x26,xzr
mov x2,sp
str x4,[x29,#112] // offload n0
// Multiply everything but a[i]*a[i]
.align 4
Lsqr8x_outer_loop:
// a[1]a[0] (i)
// a[2]a[0]
// a[3]a[0]
// a[4]a[0]
// a[5]a[0]
// a[6]a[0]
// a[7]a[0]
// a[2]a[1] (ii)
// a[3]a[1]
// a[4]a[1]
// a[5]a[1]
// a[6]a[1]
// a[7]a[1]
// a[3]a[2] (iii)
// a[4]a[2]
// a[5]a[2]
// a[6]a[2]
// a[7]a[2]
// a[4]a[3] (iv)
// a[5]a[3]
// a[6]a[3]
// a[7]a[3]
// a[5]a[4] (v)
// a[6]a[4]
// a[7]a[4]
// a[6]a[5] (vi)
// a[7]a[5]
// a[7]a[6] (vii)
mul x14,x7,x6 // lo(a[1..7]*a[0]) (i)
mul x15,x8,x6
mul x16,x9,x6
mul x17,x10,x6
adds x20,x20,x14 // t[1]+lo(a[1]*a[0])
mul x14,x11,x6
adcs x21,x21,x15
mul x15,x12,x6
adcs x22,x22,x16
mul x16,x13,x6
adcs x23,x23,x17
umulh x17,x7,x6 // hi(a[1..7]*a[0])
adcs x24,x24,x14
umulh x14,x8,x6
adcs x25,x25,x15
umulh x15,x9,x6
adcs x26,x26,x16
umulh x16,x10,x6
stp x19,x20,[x2],#8*2 // t[0..1]
adc x19,xzr,xzr // t[8]
adds x21,x21,x17 // t[2]+lo(a[1]*a[0])
umulh x17,x11,x6
adcs x22,x22,x14
umulh x14,x12,x6
adcs x23,x23,x15
umulh x15,x13,x6
adcs x24,x24,x16
mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii)
adcs x25,x25,x17
mul x17,x9,x7
adcs x26,x26,x14
mul x14,x10,x7
adc x19,x19,x15
mul x15,x11,x7
adds x22,x22,x16
mul x16,x12,x7
adcs x23,x23,x17
mul x17,x13,x7
adcs x24,x24,x14
umulh x14,x8,x7 // hi(a[2..7]*a[1])
adcs x25,x25,x15
umulh x15,x9,x7
adcs x26,x26,x16
umulh x16,x10,x7
adcs x19,x19,x17
umulh x17,x11,x7
stp x21,x22,[x2],#8*2 // t[2..3]
adc x20,xzr,xzr // t[9]
adds x23,x23,x14
umulh x14,x12,x7
adcs x24,x24,x15
umulh x15,x13,x7
adcs x25,x25,x16
mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii)
adcs x26,x26,x17
mul x17,x10,x8
adcs x19,x19,x14
mul x14,x11,x8
adc x20,x20,x15
mul x15,x12,x8
adds x24,x24,x16
mul x16,x13,x8
adcs x25,x25,x17
umulh x17,x9,x8 // hi(a[3..7]*a[2])
adcs x26,x26,x14
umulh x14,x10,x8
adcs x19,x19,x15
umulh x15,x11,x8
adcs x20,x20,x16
umulh x16,x12,x8
stp x23,x24,[x2],#8*2 // t[4..5]
adc x21,xzr,xzr // t[10]
adds x25,x25,x17
umulh x17,x13,x8
adcs x26,x26,x14
mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv)
adcs x19,x19,x15
mul x15,x11,x9
adcs x20,x20,x16
mul x16,x12,x9
adc x21,x21,x17
mul x17,x13,x9
adds x26,x26,x14
umulh x14,x10,x9 // hi(a[4..7]*a[3])
adcs x19,x19,x15
umulh x15,x11,x9
adcs x20,x20,x16
umulh x16,x12,x9
adcs x21,x21,x17
umulh x17,x13,x9
stp x25,x26,[x2],#8*2 // t[6..7]
adc x22,xzr,xzr // t[11]
adds x19,x19,x14
mul x14,x11,x10 // lo(a[5..7]*a[4]) (v)
adcs x20,x20,x15
mul x15,x12,x10
adcs x21,x21,x16
mul x16,x13,x10
adc x22,x22,x17
umulh x17,x11,x10 // hi(a[5..7]*a[4])
adds x20,x20,x14
umulh x14,x12,x10
adcs x21,x21,x15
umulh x15,x13,x10
adcs x22,x22,x16
mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi)
adc x23,xzr,xzr // t[12]
adds x21,x21,x17
mul x17,x13,x11
adcs x22,x22,x14
umulh x14,x12,x11 // hi(a[6..7]*a[5])
adc x23,x23,x15
umulh x15,x13,x11
adds x22,x22,x16
mul x16,x13,x12 // lo(a[7]*a[6]) (vii)
adcs x23,x23,x17
umulh x17,x13,x12 // hi(a[7]*a[6])
adc x24,xzr,xzr // t[13]
adds x23,x23,x14
sub x27,x3,x1 // done yet?
adc x24,x24,x15
adds x24,x24,x16
sub x14,x3,x5 // rewinded ap
adc x25,xzr,xzr // t[14]
add x25,x25,x17
cbz x27,Lsqr8x_outer_break
mov x4,x6
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x0,x1
adcs x26,xzr,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved below
mov x27,#-8*8
// a[8]a[0]
// a[9]a[0]
// a[a]a[0]
// a[b]a[0]
// a[c]a[0]
// a[d]a[0]
// a[e]a[0]
// a[f]a[0]
// a[8]a[1]
// a[f]a[1]........................
// a[8]a[2]
// a[f]a[2]........................
// a[8]a[3]
// a[f]a[3]........................
// a[8]a[4]
// a[f]a[4]........................
// a[8]a[5]
// a[f]a[5]........................
// a[8]a[6]
// a[f]a[6]........................
// a[8]a[7]
// a[f]a[7]........................
Lsqr8x_mul:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_mul
// note that carry flag is guaranteed
// to be zero at this point
cmp x1,x3 // done yet?
b.eq Lsqr8x_break
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
ldr x4,[x0,#-8*8]
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_mul
.align 4
Lsqr8x_break:
ldp x6,x7,[x0,#8*0]
add x1,x0,#8*8
ldp x8,x9,[x0,#8*2]
sub x14,x3,x1 // is it last iteration?
ldp x10,x11,[x0,#8*4]
sub x15,x2,x14
ldp x12,x13,[x0,#8*6]
cbz x14,Lsqr8x_outer_loop
stp x19,x20,[x2,#8*0]
ldp x19,x20,[x15,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x15,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x15,#8*4]
stp x25,x26,[x2,#8*6]
mov x2,x15
ldp x25,x26,[x15,#8*6]
b Lsqr8x_outer_loop
.align 4
Lsqr8x_outer_break:
// Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0]
ldp x15,x16,[sp,#8*1]
ldp x11,x13,[x14,#8*2]
add x1,x14,#8*4
ldp x17,x14,[sp,#8*3]
stp x19,x20,[x2,#8*0]
mul x19,x7,x7
stp x21,x22,[x2,#8*2]
umulh x7,x7,x7
stp x23,x24,[x2,#8*4]
mul x8,x9,x9
stp x25,x26,[x2,#8*6]
mov x2,sp
umulh x9,x9,x9
adds x20,x7,x15,lsl#1
extr x15,x16,x15,#63
sub x27,x5,#8*4
Lsqr4x_shift_n_add:
adcs x21,x8,x15
extr x16,x17,x16,#63
sub x27,x27,#8*4
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
ldp x7,x9,[x1],#8*2
umulh x11,x11,x11
mul x12,x13,x13
umulh x13,x13,x13
extr x17,x14,x17,#63
stp x19,x20,[x2,#8*0]
adcs x23,x10,x17
extr x14,x15,x14,#63
stp x21,x22,[x2,#8*2]
adcs x24,x11,x14
ldp x17,x14,[x2,#8*7]
extr x15,x16,x15,#63
adcs x25,x12,x15
extr x16,x17,x16,#63
adcs x26,x13,x16
ldp x15,x16,[x2,#8*9]
mul x6,x7,x7
ldp x11,x13,[x1],#8*2
umulh x7,x7,x7
mul x8,x9,x9
umulh x9,x9,x9
stp x23,x24,[x2,#8*4]
extr x17,x14,x17,#63
stp x25,x26,[x2,#8*6]
add x2,x2,#8*8
adcs x19,x6,x17
extr x14,x15,x14,#63
adcs x20,x7,x14
ldp x17,x14,[x2,#8*3]
extr x15,x16,x15,#63
cbnz x27,Lsqr4x_shift_n_add
ldp x1,x4,[x29,#104] // pull np and n0
adcs x21,x8,x15
extr x16,x17,x16,#63
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
umulh x11,x11,x11
stp x19,x20,[x2,#8*0]
mul x12,x13,x13
umulh x13,x13,x13
stp x21,x22,[x2,#8*2]
extr x17,x14,x17,#63
adcs x23,x10,x17
extr x14,x15,x14,#63
ldp x19,x20,[sp,#8*0]
adcs x24,x11,x14
extr x15,x16,x15,#63
ldp x6,x7,[x1,#8*0]
adcs x25,x12,x15
extr x16,xzr,x16,#63
ldp x8,x9,[x1,#8*2]
adc x26,x13,x16
ldp x10,x11,[x1,#8*4]
// Reduce by 512 bits per iteration
mul x28,x4,x19 // t[0]*n0
ldp x12,x13,[x1,#8*6]
add x3,x1,x5
ldp x21,x22,[sp,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[sp,#8*4]
stp x25,x26,[x2,#8*6]
ldp x25,x26,[sp,#8*6]
add x1,x1,#8*8
mov x30,xzr // initial top-most carry
mov x2,sp
mov x27,#8
Lsqr8x_reduction:
// (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0)
mul x15,x7,x28
sub x27,x27,#1
mul x16,x8,x28
str x28,[x2],#8 // put aside t[0]*n0 for tail processing
mul x17,x9,x28
// (*) adds xzr,x19,x14
subs xzr,x19,#1 // (*)
mul x14,x10,x28
adcs x19,x20,x15
mul x15,x11,x28
adcs x20,x21,x16
mul x16,x12,x28
adcs x21,x22,x17
mul x17,x13,x28
adcs x22,x23,x14
umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0)
adcs x23,x24,x15
umulh x15,x7,x28
adcs x24,x25,x16
umulh x16,x8,x28
adcs x25,x26,x17
umulh x17,x9,x28
adc x26,xzr,xzr
adds x19,x19,x14
umulh x14,x10,x28
adcs x20,x20,x15
umulh x15,x11,x28
adcs x21,x21,x16
umulh x16,x12,x28
adcs x22,x22,x17
umulh x17,x13,x28
mul x28,x4,x19 // next t[0]*n0
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adc x26,x26,x17
cbnz x27,Lsqr8x_reduction
ldp x14,x15,[x2,#8*0]
ldp x16,x17,[x2,#8*2]
mov x0,x2
sub x27,x3,x1 // done yet?
adds x19,x19,x14
adcs x20,x20,x15
ldp x14,x15,[x2,#8*4]
adcs x21,x21,x16
adcs x22,x22,x17
ldp x16,x17,[x2,#8*6]
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adcs x26,x26,x17
//adc x28,xzr,xzr // moved below
cbz x27,Lsqr8x8_post_condition
ldr x4,[x2,#-8*8]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
mov x27,#-8*8
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
Lsqr8x_tail:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_tail
// note that carry flag is guaranteed
// to be zero at this point
ldp x6,x7,[x2,#8*0]
sub x27,x3,x1 // done yet?
sub x16,x3,x5 // rewinded np
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
cbz x27,Lsqr8x_tail_break
ldr x4,[x0,#-8*8]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_tail
.align 4
Lsqr8x_tail_break:
ldr x4,[x29,#112] // pull n0
add x27,x2,#8*8 // end of current t[num] window
subs xzr,x30,#1 // "move" top-most carry to carry bit
adcs x14,x19,x6
adcs x15,x20,x7
ldp x19,x20,[x0,#8*0]
adcs x21,x21,x8
ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0]
adcs x22,x22,x9
ldp x8,x9,[x16,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x16,#8*4]
adcs x25,x25,x12
adcs x26,x26,x13
ldp x12,x13,[x16,#8*6]
add x1,x16,#8*8
adc x30,xzr,xzr // top-most carry
mul x28,x4,x19
stp x14,x15,[x2,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x0,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x0,#8*4]
cmp x27,x29 // did we hit the bottom?
stp x25,x26,[x2,#8*6]
mov x2,x0 // slide the window
ldp x25,x26,[x0,#8*6]
mov x27,#8
b.ne Lsqr8x_reduction
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x0,[x29,#96] // pull rp
add x2,x2,#8*8
subs x14,x19,x6
sbcs x15,x20,x7
sub x27,x5,#8*8
mov x3,x0 // x0 copy
Lsqr8x_sub:
sbcs x16,x21,x8
ldp x6,x7,[x1,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x1,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x10,x11,[x1,#8*4]
sbcs x17,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
ldp x19,x20,[x2,#8*0]
sub x27,x27,#8*8
ldp x21,x22,[x2,#8*2]
ldp x23,x24,[x2,#8*4]
ldp x25,x26,[x2,#8*6]
add x2,x2,#8*8
stp x14,x15,[x0,#8*4]
sbcs x14,x19,x6
stp x16,x17,[x0,#8*6]
add x0,x0,#8*8
sbcs x15,x20,x7
cbnz x27,Lsqr8x_sub
sbcs x16,x21,x8
mov x2,sp
add x1,sp,x5
ldp x6,x7,[x3,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x3,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x19,x20,[x1,#8*0]
sbcs x17,x26,x13
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
stp x14,x15,[x0,#8*4]
stp x16,x17,[x0,#8*6]
sub x27,x5,#8*4
Lsqr4x_cond_copy:
sub x27,x27,#8*4
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
ldp x6,x7,[x3,#8*4]
ldp x19,x20,[x1,#8*4]
csel x16,x21,x8,lo
stp xzr,xzr,[x2,#8*2]
add x2,x2,#8*4
csel x17,x22,x9,lo
ldp x8,x9,[x3,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
add x3,x3,#8*4
stp xzr,xzr,[x1,#8*0]
stp xzr,xzr,[x1,#8*2]
cbnz x27,Lsqr4x_cond_copy
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
stp xzr,xzr,[x2,#8*2]
csel x16,x21,x8,lo
csel x17,x22,x9,lo
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
b Lsqr8x_done
.align 4
Lsqr8x8_post_condition:
adc x28,xzr,xzr
ldr x30,[x29,#8] // pull return address
// x19-7,x28 hold result, x6-7 hold modulus
subs x6,x19,x6
ldr x1,[x29,#96] // pull rp
sbcs x7,x20,x7
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x8
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x9
stp xzr,xzr,[sp,#8*4]
sbcs x10,x23,x10
stp xzr,xzr,[sp,#8*6]
sbcs x11,x24,x11
stp xzr,xzr,[sp,#8*8]
sbcs x12,x25,x12
stp xzr,xzr,[sp,#8*10]
sbcs x13,x26,x13
stp xzr,xzr,[sp,#8*12]
sbcs x28,x28,xzr // did it borrow?
stp xzr,xzr,[sp,#8*14]
// x6-7 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
csel x10,x23,x10,lo
csel x11,x24,x11,lo
stp x8,x9,[x1,#8*2]
csel x12,x25,x12,lo
csel x13,x26,x13,lo
stp x10,x11,[x1,#8*4]
stp x12,x13,[x1,#8*6]
Lsqr8x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.def __bn_mul4x_mont
.type 32
.endef
.align 5
__bn_mul4x_mont:
// Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_mul4x_mont is jumped to
// only from bn_mul_mont or __bn_mul8x_mont which have already signed the
// return address.
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub x26,sp,x5,lsl#3
lsl x5,x5,#3
ldr x4,[x4] // *n0
sub sp,x26,#8*4 // alloca
add x10,x2,x5
add x27,x1,x5
stp x0,x10,[x29,#96] // offload rp and &b[num]
ldr x24,[x2,#8*0] // b[0]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
ldp x14,x15,[x3,#8*0] // n[0..3]
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
mov x28,#0
mov x26,sp
Loop_mul4x_1st_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[0])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[0])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
// (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0)
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0)
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
sub x10,x27,x1
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_reduction
cbz x10,Lmul4x4_post_condition
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldr x25,[sp] // a[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
Loop_mul4x_1st_tail:
mul x10,x6,x24 // lo(a[4..7]*b[i])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[i])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*a[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
adcs x23,x23,x0
umulh x13,x17,x25
adc x0,xzr,xzr
ldr x25,[sp,x28] // next t[0]*n0
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_tail
sub x11,x27,x5 // rewinded x1
cbz x10,Lmul4x_proceed
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_1st_tail
.align 5
Lmul4x_proceed:
ldr x24,[x2,#8*4]! // *++b
adc x30,x0,xzr
ldp x6,x7,[x11,#8*0] // a[0..3]
sub x3,x3,x5 // rewind np
ldp x8,x9,[x11,#8*2]
add x1,x11,#8*4
stp x19,x20,[x26,#8*0] // result!!!
ldp x19,x20,[sp,#8*4] // t[0..3]
stp x21,x22,[x26,#8*2] // result!!!
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x3,#8*0] // n[0..3]
mov x26,sp
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
.align 4
Loop_mul4x_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[4])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
// (*) mul x10,x14,x25
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25 // lo(n[0..3]*t[0]*n0
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_reduction
adc x0,x0,xzr
ldp x10,x11,[x26,#8*4] // t[4..7]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldr x25,[sp] // t[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
.align 4
Loop_mul4x_tail:
mul x10,x6,x24 // lo(a[4..7]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[4])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*t[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
umulh x13,x17,x25
adcs x23,x23,x0
ldr x25,[sp,x28] // next a[0]*n0
adc x0,xzr,xzr
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_tail
sub x11,x3,x5 // rewinded np?
adc x0,x0,xzr
cbz x10,Loop_mul4x_break
ldp x10,x11,[x26,#8*4]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_tail
.align 4
Loop_mul4x_break:
ldp x12,x13,[x29,#96] // pull rp and &b[num]
adds x19,x19,x30
add x2,x2,#8*4 // bp++
adcs x20,x20,xzr
sub x1,x1,x5 // rewind ap
adcs x21,x21,xzr
stp x19,x20,[x26,#8*0] // result!!!
adcs x22,x22,xzr
ldp x19,x20,[sp,#8*4] // t[0..3]
adc x30,x0,xzr
stp x21,x22,[x26,#8*2] // result!!!
cmp x2,x13 // done yet?
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x11,#8*0] // n[0..3]
ldp x16,x17,[x11,#8*2]
add x3,x11,#8*4
b.eq Lmul4x_post
ldr x24,[x2]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
adds x1,x1,#8*4 // clear carry bit
mov x0,xzr
mov x26,sp
b Loop_mul4x_reduction
.align 4
Lmul4x_post:
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
mov x0,x12
mov x27,x12 // x0 copy
subs x10,x19,x14
add x26,sp,#8*8
sbcs x11,x20,x15
sub x28,x5,#8*4
Lmul4x_sub:
sbcs x12,x21,x16
ldp x14,x15,[x3,#8*0]
sub x28,x28,#8*4
ldp x19,x20,[x26,#8*0]
sbcs x13,x22,x17
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
ldp x21,x22,[x26,#8*2]
add x26,x26,#8*4
stp x10,x11,[x0,#8*0]
sbcs x10,x19,x14
stp x12,x13,[x0,#8*2]
add x0,x0,#8*4
sbcs x11,x20,x15
cbnz x28,Lmul4x_sub
sbcs x12,x21,x16
mov x26,sp
add x1,sp,#8*4
ldp x6,x7,[x27,#8*0]
sbcs x13,x22,x17
stp x10,x11,[x0,#8*0]
ldp x8,x9,[x27,#8*2]
stp x12,x13,[x0,#8*2]
ldp x19,x20,[x1,#8*0]
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
sub x28,x5,#8*4
Lmul4x_cond_copy:
sub x28,x28,#8*4
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
ldp x6,x7,[x27,#8*4]
ldp x19,x20,[x1,#8*4]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*2]
add x26,x26,#8*4
csel x13,x22,x9,lo
ldp x8,x9,[x27,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
add x27,x27,#8*4
cbnz x28,Lmul4x_cond_copy
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
stp xzr,xzr,[x26,#8*2]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*3]
csel x13,x22,x9,lo
stp xzr,xzr,[x26,#8*4]
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
b Lmul4x_done
.align 4
Lmul4x4_post_condition:
adc x0,x0,xzr
ldr x1,[x29,#96] // pull rp
// x19-3,x0 hold result, x14-7 hold modulus
subs x6,x19,x14
ldr x30,[x29,#8] // pull return address
sbcs x7,x20,x15
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x16
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x17
stp xzr,xzr,[sp,#8*4]
sbcs xzr,x0,xzr // did it borrow?
stp xzr,xzr,[sp,#8*6]
// x6-3 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
stp x8,x9,[x1,#8*2]
Lmul4x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 4
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
weix2025/toy
| 1,923
|
deps/boringssl/win-aarch64/crypto/fipsmodule/bn-armv8-win.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
.text
// BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
// size_t num);
.globl bn_add_words
.align 4
bn_add_words:
AARCH64_VALID_CALL_TARGET
# Clear the carry flag.
cmn xzr, xzr
# aarch64 can load two registers at a time, so we do two loop iterations at
# at a time. Split x3 = 2 * x8 + x3. This allows loop
# operations to use CBNZ without clobbering the carry flag.
lsr x8, x3, #1
and x3, x3, #1
cbz x8, Ladd_tail
Ladd_loop:
ldp x4, x5, [x1], #16
ldp x6, x7, [x2], #16
sub x8, x8, #1
adcs x4, x4, x6
adcs x5, x5, x7
stp x4, x5, [x0], #16
cbnz x8, Ladd_loop
Ladd_tail:
cbz x3, Ladd_exit
ldr x4, [x1], #8
ldr x6, [x2], #8
adcs x4, x4, x6
str x4, [x0], #8
Ladd_exit:
cset x0, cs
ret
// BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
// size_t num);
.globl bn_sub_words
.align 4
bn_sub_words:
AARCH64_VALID_CALL_TARGET
# Set the carry flag. Arm's borrow bit is flipped from the carry flag,
# so we want C = 1 here.
cmp xzr, xzr
# aarch64 can load two registers at a time, so we do two loop iterations at
# at a time. Split x3 = 2 * x8 + x3. This allows loop
# operations to use CBNZ without clobbering the carry flag.
lsr x8, x3, #1
and x3, x3, #1
cbz x8, Lsub_tail
Lsub_loop:
ldp x4, x5, [x1], #16
ldp x6, x7, [x2], #16
sub x8, x8, #1
sbcs x4, x4, x6
sbcs x5, x5, x7
stp x4, x5, [x0], #16
cbnz x8, Lsub_loop
Lsub_tail:
cbz x3, Lsub_exit
ldr x4, [x1], #8
ldr x6, [x2], #8
sbcs x4, x4, x6
str x4, [x0], #8
Lsub_exit:
cset x0, cc
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.